Searched refs:partial (Results 51 - 75 of 137) sorted by relevance

123456

/linux-master/lib/
H A Dfortify_kunit.c660 int i, partial; local
663 partial = sizeof(src) / 2 - 1;
664 for (i = 0; i < partial; i++)
675 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
679 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
733 int i, partial; local
737 partial = sizeof(src) / 2 - 1;
738 for (i = 0; i < partial; i++)
749 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial);
753 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial *
[all...]
H A Dtest_firmware.c62 * @partial: partial read opt when calling request_firmware_into_buf
105 bool partial; member in struct:test_config
235 test_fw_config->partial = false;
313 "partial:\t\t%s\n",
314 test_fw_config->partial ? "true" : "false");
599 &test_fw_config->partial);
606 return test_dev_config_show_bool(buf, test_fw_config->partial);
873 if (test_fw_config->partial)
/linux-master/arch/mips/kernel/
H A Dentry.S97 restore_partial: # restore partial frame
/linux-master/net/xfrm/
H A Despintcp.c262 struct espintcp_msg *emsg = &ctx->partial;
291 struct espintcp_msg *emsg = &ctx->partial;
326 struct espintcp_msg *emsg = &ctx->partial;
530 struct espintcp_msg *emsg = &ctx->partial;
/linux-master/drivers/crypto/intel/qat/qat_common/
H A Dicp_qat_fw_la.h84 ciph_iv, ciphcfg, partial) \
103 ((partial & QAT_LA_PARTIAL_MASK) << \
/linux-master/fs/ocfs2/
H A Dmove_extents.c37 int partial; member in struct:ocfs2_move_extents_context
192 * XXX: defrag can end up with finishing partial extent as requested,
198 int ret, credits = 0, extra_blocks = 0, partial = context->partial; local
287 * allowing partial extent moving is kind of 'pros and cons', it makes
294 if (!partial) {
310 if (partial && (new_len != *len))
1039 context->partial = 1;
/linux-master/net/ipv4/
H A Dudp_offload.c30 __wsum partial; local
36 /* Adjust partial header checksum to negate old length.
43 partial = (__force __wsum)uh->len;
45 partial = (__force __wsum)htonl(skb->len);
46 partial = csum_sub(csum_unfold(uh->check), partial);
121 /* If we are only performing partial GSO the inner header
136 uh->check = ~csum_fold(csum_add(partial,
305 /* GSO partial and frag_list segmentation only requires splitting
347 /* last packet can be partial gso_siz
[all...]
/linux-master/drivers/gpu/drm/i915/gem/selftests/
H A Di915_gem_mman.c131 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
136 n = page - view.partial.offset;
137 GEM_BUG_ON(n >= view.partial.size);
142 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
163 view.partial.offset,
164 view.partial.size,
223 GEM_BUG_ON(view.partial.size > nreal);
228 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
233 n = page - view.partial.offset;
234 GEM_BUG_ON(n >= view.partial
[all...]
/linux-master/drivers/gpu/drm/tiny/
H A Drepaper.c93 bool partial; member in struct:repaper_epd
526 /* repaper can't do partial updates */
553 if (epd->partial) {
564 epd->partial = true;
579 epd->partial = true;
769 epd->partial = false;
/linux-master/fs/cramfs/
H A Dinode.c332 u32 partial, last_page, blockaddr, *blockptrs; local
335 partial = offset_in_page(inode->i_size);
336 if (!partial)
342 tail_data = sbi->linear_virt_addr + blockaddr + partial;
343 return memchr_inv(tail_data, 0, PAGE_SIZE - partial) ? true : false;
/linux-master/drivers/soc/fsl/qbman/
H A Dbman.c297 struct bm_rcr_entry *partial = rcr->cursor + 1; local
299 rcr->cursor = rcr_carryclear(partial);
300 if (partial != rcr->cursor)
/linux-master/arch/alpha/lib/
H A Dev6-memset.S81 ldq_u $4,0($16) # L : Fetch first partial
100 * one partial quad to write.
259 ldq_u $4,0($16) # L : Fetch first partial
278 * one partial quad to write.
447 ldq_u $4,0($16) # L : Fetch first partial
466 * one partial quad to write.
H A Dmemchr.S121 # last quad may or may not be partial).
H A Dev6-memchr.S140 # last quad may or may not be partial).
H A Dstxncpy.S78 /* Take care of the final (partial) word store. At this point
94 /* We're doing a partial word store and so need to combine
133 /* We are co-aligned; take care of a partial first word. */
175 cmpbge zero, t2, t8 # e0 : find nulls in second partial
222 If it resides in the lower half, we have one (probably partial)
224 have one full and one partial word left to write out.
231 or t0, t1, t0 # e0 : first (partial) source word complete
247 /* Take care of a final (probably partial) result word.
326 /* At this point we've found a zero in the first partial word of
/linux-master/fs/ext4/
H A Dmove_extent.c177 int i, err, nr = 0, partial = 0; local
195 partial = 1;
228 if (!partial)
H A Dextents_status.c1057 bool partial; member in struct:rsvd_count
1083 * the region to be removed, if any, and note that there's no partial
1096 rc->partial = false;
1111 * Tracks partial clusters found at the beginning and end of extents so
1146 * if we're tracking a partial cluster and the current extent
1149 if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) {
1151 rc->partial = false;
1161 rc->partial = false;
1177 * start tracking a partial cluster if there's a partial a
[all...]
/linux-master/tools/testing/kunit/
H A Dkunit_kernel.py181 partial = kunit_config.parse_file(path)
182 diff = merged.conflicting_options(partial)
186 merged.merge_in_entries(partial)
/linux-master/mm/
H A Dslub.c7 * and only uses a centralized lock to manage a pool of partial slabs.
88 * CPU partial slabs
90 * The partially empty slabs cached on the CPU partial list are used
94 * partial list. Please see __slab_free() for more details.
97 * - node partial slab: PG_Workingset && !frozen
98 * - cpu partial slab: !PG_Workingset && !frozen
104 * The list_lock protects the partial and full list on each node and
105 * the partial slab counter. If taken then no new slabs may be added or
106 * removed from the lists nor make the number of partial slabs be modified.
111 * much as possible. As long as SLUB does not have to handle partial
394 struct slab *partial; /* Partially allocated slabs */ member in struct:kmem_cache_cpu
428 struct list_head partial; member in struct:kmem_cache_node
2573 struct slab *slab, *slab2, *partial = NULL; local
6216 SLAB_ATTR_RO(partial); variable
[all...]
/linux-master/crypto/asymmetric_keys/
H A Dasymmetric_type.c42 * @partial: Use partial match for @id_0 and @id_1 if true, exact if false.
55 bool partial)
84 } else if (partial) {
272 * Match asymmetric keys by a partial match on one of the first two IDs.
303 * "id:<id>" - find a key by partial match on one of the first two IDs
51 find_asymmetric_key(struct key *keyring, const struct asymmetric_key_id *id_0, const struct asymmetric_key_id *id_1, const struct asymmetric_key_id *id_2, bool partial) argument
/linux-master/tools/cgroup/
H A Dmemcg_slabinfo.py73 for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_mman.c208 view.partial.offset = rounddown(page_offset, chunk);
209 view.partial.size =
211 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
213 /* If the partial covers the entire object, just create a normal VMA. */
344 /* Use a partial view if it is bigger than available space */
407 area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
/linux-master/fs/afs/
H A Dflock.c458 bool partial, no_server_lock = false; local
472 partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX);
474 if (mode == afs_flock_mode_write && partial)
484 * method of upgrade/downgrade, so we need to emulate for partial-file
488 * keeps partial-file locks local. Allow this behaviour to be emulated
492 (partial && mode == afs_flock_mode_openafs)) {
/linux-master/fs/
H A Dsplice.c220 buf->offset = spd->partial[page_nr].offset;
221 buf->len = spd->partial[page_nr].len;
222 buf->private = spd->partial[page_nr].private;
269 * Check if we need to grow the arrays holding pages and partial page
281 spd->partial = kmalloc_array(max_usage, sizeof(struct partial_page),
284 if (spd->pages && spd->partial)
288 kfree(spd->partial);
298 kfree(spd->partial);
752 /* dismiss the fully eaten buffers, adjust the partial one */
/linux-master/drivers/gpu/drm/i915/
H A Di915_vma.c184 view->partial.offset,
185 view->partial.size,
187 vma->size = view->partial.size;
1273 unsigned int count = view->partial.size;
1286 sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
1909 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;

Completed in 357 milliseconds

123456