Searched refs:chunks (Results 1 - 25 of 88) sorted by relevance

1234

/linux-master/drivers/comedi/drivers/ni_routing/tools/
H A Dconvert_csv_to_c.py228 chunks = [ self.output_file_top,
244 chunks.append('\t&{},'.format(dev_table_name))
273 chunks.append('\tNULL,') # terminate list
274 chunks.append('};')
275 return '\n'.join(chunks)
416 chunks = [ self.output_file_top,
432 chunks.append('\t&{},'.format(fam_table_name))
462 chunks.append('\tNULL,') # terminate list
463 chunks.append('};')
464 return '\n'.join(chunks)
[all...]
/linux-master/tools/testing/selftests/drivers/net/mlxsw/spectrum/
H A Ddevlink_lib_spectrum.sh13 KVDL_CHILDREN="singles chunks large_chunks"
90 devlink_resource_size_set 32000 kvd linear chunks
99 devlink_resource_size_set 32000 kvd linear chunks
108 devlink_resource_size_set 49152 kvd linear chunks
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_cs.h54 /* chunks */
56 struct amdgpu_cs_chunk *chunks; member in struct:amdgpu_cs_parser
H A Damdgpu_cs.c193 /* get chunks */
194 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
202 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
204 if (!p->chunks) {
221 p->chunks[i].chunk_id = user_chunk.chunk_id;
222 p->chunks[i].length_dw = user_chunk.length_dw;
224 size = p->chunks[i].length_dw;
227 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
229 if (p->chunks[i].kdata == NULL) {
235 if (copy_from_user(p->chunks[
[all...]
/linux-master/tools/testing/selftests/bpf/
H A Dgenerate_udp_fragments.py46 chunks = [frag[i : i + 10] for i in range(0, len(frag), 10)]
47 chunks_fmted = [", ".join([str(hex(b)) for b in chunk]) for chunk in chunks]
/linux-master/scripts/gdb/linux/
H A Dtimerlist.py163 chunks = []
169 chunks.append(buf[start:end])
171 chunks.append(',')
175 chunks[0] = chunks[0][0] # Cut off the first 0
177 return "".join(str(chunks))
/linux-master/net/xdp/
H A Dxdp_umem.c162 u64 chunks, npgs; local
197 chunks = div_u64_rem(size, chunk_size, &chunks_rem);
198 if (!chunks || chunks > U32_MAX)
213 umem->chunks = chunks;
/linux-master/drivers/net/ethernet/netronome/nfp/nfpcore/
H A Dnfp_nsp.c505 } *chunks; local
517 chunks = kcalloc(nseg, sizeof(*chunks), GFP_KERNEL);
518 if (!chunks)
526 chunks[i].chunk = kmalloc(chunk_size,
528 if (!chunks[i].chunk)
531 chunks[i].len = min_t(u64, chunk_size, max_size - off);
536 memcpy(chunks[i].chunk, arg->in_buf + off, coff);
538 memset(chunks[i].chunk + coff, 0, chunk_size - coff);
540 off += chunks[
[all...]
/linux-master/drivers/gpu/drm/radeon/
H A Dradeon_cs.c284 /* get chunks */
296 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
303 p->chunks = kvcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
304 if (p->chunks == NULL) {
317 p->chunks[i].length_dw = user_chunk.length_dw;
319 p->chunk_relocs = &p->chunks[i];
322 p->chunk_ib = &p->chunks[i];
324 if (p->chunks[i].length_dw == 0)
328 p->chunk_const_ib = &p->chunks[i];
330 if (p->chunks[
[all...]
/linux-master/drivers/infiniband/hw/usnic/
H A Dusnic_vnic.c44 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member in struct:usnic_vnic
117 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) {
118 chunk = &vnic->chunks[i];
222 return vnic->chunks[type].cnt;
228 return vnic->chunks[type].free_cnt;
254 src = &vnic->chunks[type];
286 vnic->chunks[res->type].free_cnt++;
382 &vnic->chunks[res_type]);
391 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]);
427 usnic_vnic_free_res_chunk(&vnic->chunks[res_typ
[all...]
/linux-master/mm/
H A Dzbud.c31 * zbud pages are divided into "chunks". The size of the chunks is fixed at
33 * into chunks allows organizing unbuddied zbud pages into a manageable number
34 * of unbuddied lists according to the number of free chunks available in the
63 * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk
65 * 63 which shows the max number of free chunks in zbud page, also there will be
108 * @first_chunks: the size of the first buddy in chunks, 0 if free
109 * @last_chunks: the size of the last buddy in chunks, 0 if free
126 /* Converts an allocation size in bytes to size in zbud chunks */
180 /* Returns the number of free chunks i
249 int chunks, i, freechunks; local
[all...]
H A Dz3fold.c18 * As in zbud, pages are divided into "chunks". The size of the chunks is
48 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
51 * which shows the max number of free chunks in z3fold page, also there will
92 * struct z3fold_header - z3fold page metadata occupying first chunks of each
102 * @first_chunks: the size of the first buddy in chunks, 0 if free
103 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
104 * @last_chunks: the size of the last buddy in chunks, 0 if free
185 /* Converts an allocation size in bytes to size in z3fold chunks */
557 get_free_buddy(struct z3fold_header *zhdr, int chunks) argument
630 short chunks = size_to_chunks(sz); local
792 int chunks = size_to_chunks(size), i; local
1004 int chunks = size_to_chunks(size); local
[all...]
/linux-master/lib/
H A Dgenalloc.c160 INIT_LIST_HEAD(&pool->chunks);
203 list_add_rcu(&chunk->next_chunk, &pool->chunks);
223 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
249 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
297 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
503 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
538 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
561 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
586 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
605 list_for_each_entry_rcu(chunk, &pool->chunks, next_chun
[all...]
H A Dbitmap-str.c466 * Commas group hex digits into chunks. Each chunk defines exactly 32
477 int chunks = BITS_TO_U32(nmaskbits); local
487 if (!chunks--)
499 unset_bit = (BITS_TO_U32(nmaskbits) - chunks) * 32;
/linux-master/net/sctp/
H A Dchunk.c43 INIT_LIST_HEAD(&msg->chunks);
65 list_for_each_entry(chunk, &msg->chunks, frag_list)
81 list_for_each_safe(pos, temp, &msg->chunks) {
140 * down any such message into smaller chunks. Opportunistically, fragment
141 * the chunks down to the current MTU constraints. We may get refragmented
182 /* If the peer requested that we authenticate DATA chunks
183 * we need to account for bundling of the AUTH chunks along with
235 /* Create chunks for all DATA chunks. */
280 list_add_tail(&chunk->frag_list, &msg->chunks);
[all...]
/linux-master/drivers/net/ethernet/intel/idpf/
H A Didpf_virtchnl.c1007 struct virtchnl2_vector_chunks *chunks; local
1012 chunks = &vport->adapter->req_vec_chunks->vchunks;
1013 num_vchunks = le16_to_cpu(chunks->num_vchunks);
1020 chunk = &chunks->vchunks[j];
1049 * @chunks: queue regs received over mailbox
1057 struct virtchnl2_queue_reg_chunks *chunks)
1059 u16 num_chunks = le16_to_cpu(chunks->num_chunks);
1067 chunk = &chunks->chunks[num_chunks];
1148 struct virtchnl2_queue_reg_chunks *chunks; local
1056 idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type, struct virtchnl2_queue_reg_chunks *chunks) argument
2020 struct virtchnl2_queue_reg_chunks *chunks; local
3156 idpf_get_vec_ids(struct idpf_adapter *adapter, u16 *vecids, int num_vecids, struct virtchnl2_vector_chunks *chunks) argument
3199 idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type, struct virtchnl2_queue_reg_chunks *chunks) argument
3318 struct virtchnl2_queue_reg_chunks *chunks; local
[all...]
H A Didpf_virtchnl.h51 struct virtchnl2_vector_chunks *chunks);
H A Dvirtchnl2.h549 * struct virtchnl2_queue_reg_chunks - Specify several chunks of contiguous
551 * @num_chunks: Number of chunks.
553 * @chunks: Chunks of queue info.
558 struct virtchnl2_queue_reg_chunk chunks[]; member in struct:virtchnl2_queue_reg_chunks
589 * @chunks: Chunks of contiguous queues.
594 * necessary fields followed by chunks which in turn will have an array of
622 struct virtchnl2_queue_reg_chunks chunks; member in struct:virtchnl2_create_vport
797 * @chunks: Chunks of contiguous queues.
814 struct virtchnl2_queue_reg_chunks chunks; member in struct:virtchnl2_add_queues
858 * struct virtchnl2_vector_chunks - chunks o
1134 struct virtchnl2_queue_chunk chunks[]; member in struct:virtchnl2_queue_chunks
1155 struct virtchnl2_queue_chunks chunks; member in struct:virtchnl2_del_ena_dis_queues
[all...]
/linux-master/drivers/net/wireless/intel/iwlwifi/pcie/
H A Dctxt-info-gen3.c296 len0 = pnvm_data->chunks[0].len;
297 len1 = pnvm_data->chunks[1].len;
312 memcpy(dram->block, pnvm_data->chunks[0].data, len0);
313 memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1);
346 len = pnvm_data->chunks[i].len;
347 data = pnvm_data->chunks[i].data;
/linux-master/include/linux/
H A Dshdma-base.h52 int chunks; member in struct:shdma_desc
/linux-master/kernel/
H A Daudit_tree.c17 struct list_head chunks; member in struct:audit_tree
68 * tree.chunks anchors chunk.owners[].list hash_lock
101 INIT_LIST_HEAD(&tree->chunks);
435 list_add(&chunk->owners[0].list, &tree->chunks);
507 list_add(&p->list, &tree->chunks);
565 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
566 * chunks. The function expects tagged chunks are all at the beginning of the
567 * chunks list.
572 while (!list_empty(&victim->chunks)) {
[all...]
/linux-master/drivers/net/wireless/ti/wlcore/
H A Dboot.c237 u32 chunks, addr, len; local
242 chunks = be32_to_cpup((__be32 *) fw);
245 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
247 while (chunks--) {
258 chunks, addr, len);
/linux-master/drivers/virt/vboxguest/
H A Dvboxguest_core.c361 u32 i, chunks; local
376 * The host always returns the same maximum amount of chunks, so
389 chunks = req->balloon_chunks;
390 if (chunks > gdev->mem_balloon.max_chunks) {
392 __func__, chunks, gdev->mem_balloon.max_chunks);
396 if (chunks > gdev->mem_balloon.chunks) {
398 for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
403 gdev->mem_balloon.chunks
[all...]
/linux-master/drivers/infiniband/hw/efa/
H A Defa_verbs.c113 struct pbl_chunk *chunks; member in struct:pbl_chunk_list
1284 /* allocate a chunk list that consists of 4KB chunks */
1288 chunk_list->chunks = kcalloc(chunk_list_size,
1289 sizeof(*chunk_list->chunks),
1291 if (!chunk_list->chunks)
1300 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1301 if (!chunk_list->chunks[i].buf)
1304 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1306 chunk_list->chunks[chunk_list_size - 1].length =
1310 /* fill the dma addresses of sg list pages to chunks
[all...]
/linux-master/drivers/md/
H A Dmd-bitmap.c811 unsigned long chunks, int with_super,
818 bytes = DIV_ROUND_UP(chunks, 8);
1111 unsigned long chunks = bitmap->counts.chunks; local
1123 for (i = 0; i < chunks ; i++) {
1193 for (i = 0; i < chunks; i++) {
1219 bit_cnt, chunks);
1354 for (j = 0; j < counts->chunks; j++) {
1666 /* Sync has finished, and any bitmap chunks that weren't synced
1758 /* dirty the memory and file bits for bitmap chunks "
810 md_bitmap_storage_alloc(struct bitmap_storage *store, unsigned long chunks, int with_super, int slot_number) argument
2139 unsigned long chunks; local
[all...]

Completed in 397 milliseconds

1234