Searched refs:layers (Results 1 - 25 of 81) sorted by last modified time

1234

/linux-master/drivers/gpu/drm/xlnx/
H A Dzynqmp_kms.c80 struct zynqmp_disp_layer *layer = dpsub->layers[plane->index];
98 struct zynqmp_disp_layer *layer = dpsub->layers[plane->index];
149 struct zynqmp_disp_layer *layer = dpsub->layers[i];
H A Dzynqmp_dp.c1285 * layers are connected.
1291 return dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_VID];
1293 return dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_GFX];
1318 if (layer == dp->dpsub->layers[ZYNQMP_DPSUB_LAYER_GFX])
H A Dzynqmp_disp.c60 * layers, and a CRTC for the Video Rendering Pipeline.
147 * @layers: Layers (planes)
163 struct zynqmp_disp_layer layers[ZYNQMP_DPSUB_NUM_LAYERS]; member in struct:zynqmp_disp
969 * NOTE: This function doesn't make sense for live video layers and will
1009 * NOTE: This function should be used only for live video input layers.
1079 * live video layers.
1222 * zynqmp_disp_destroy_layers - Destroy all layers
1229 for (i = 0; i < ARRAY_SIZE(disp->layers); i++)
1230 zynqmp_disp_layer_release_dma(disp, &disp->layers[i]);
1268 * zynqmp_disp_create_layers - Create and initialize all layers
[all...]
/linux-master/drivers/gpu/drm/arm/display/komeda/
H A Dkomeda_pipeline.c81 pos = to_cpos(pipe->layers[id - KOMEDA_COMPONENT_LAYER0]);
301 if (left->layer_type == pipe->layers[i]->layer_type)
302 return pipe->layers[i];
319 layer = pipe->layers[i];
/linux-master/fs/overlayfs/
H A Dsuper.c399 * file handles, so they require that all layers support them.
488 pr_err("upper fs is r/o, try multi-lower layers mount\n");
912 * as all lower layers with null uuid are on the same fs.
973 * The fsid after the last lower fsid is used for the data layers.
983 struct ovl_fs_context *ctx, struct ovl_layer *layers)
995 * and the last fsid is reserved for "null fs" of the data layers.
1000 * All lower layers that share the same fs as upper layer, use the same
1031 * Check if lower root conflicts with this overlay layers before
1058 * Make lower layers R/O. That way fchmod/fchown on lower file
1063 layers[of
982 ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, struct ovl_fs_context *ctx, struct ovl_layer *layers) argument
1110 ovl_get_lowerstack(struct super_block *sb, struct ovl_fs_context *ctx, struct ovl_fs *ofs, struct ovl_layer *layers) argument
1298 struct ovl_layer *layers; local
[all...]
H A Dparams.c206 /* count layers, not colons */
308 return invalfc(fc, "regular lower layers cannot follow data layers");
427 * Set "/lower1", "/lower2", and "/lower3" as lower layers and
428 * "/data1" and "/data2" as data lower layers. Any existing lower
429 * layers are replaced.
445 /* drop all existing lower layers */
517 * there are no data layers.
520 pr_err("regular lower layers cannot follow data lower layers");
[all...]
H A Dutil.c479 * for the layers where xwhiteouts marker was found in that merge dir.
488 ofs->layers[layer->idx].has_xwhiteouts = true;
H A Dovl_entry.h60 /* Number of unique fs among layers including upper fs */
62 /* Number of data-only lower layers */
64 struct ovl_layer *layers; member in struct:ovl_fs
95 /* Number of lower layers, not including data-only layers */
103 return ofs->layers[0].mnt;
H A Dnamei.c51 * that will stop further lookup in lower layers (d->stop=true)
54 * layers (reset d->stop).
285 /* Caught in a trap of overlapping layers */
397 /* Lookup in data-only layers by absolute redirect to layer root */
407 layer = &ofs->layers[ofs->numlayer - ofs->numdatalayer];
432 if (ofs->layers[i].fsid &&
433 ofs->layers[i].fs->bad_uuid)
436 origin = ovl_decode_real_fh(ofs, fh, ofs->layers[i].mnt,
459 .layer = &ofs->layers[i]
881 *layer = &OVL_FS(dentry->d_sb)->layers[
[all...]
H A Dexport.c61 * entry /a in the lower layers above layer N and find the indexed dir /a from
63 * will need to verify there are no redirects in lower layers above N. In the
173 * possible when there are redirects in lower layers and non-indexed merge dirs.
463 this = ovl_lookup_real(sb, upper, &ofs->layers[0]);
643 const struct ovl_layer *layer = upper ? &ofs->layers[0] : lowerpath->layer;
/linux-master/drivers/net/ethernet/intel/ice/devlink/
H A Ddevlink.c529 * @layers: value read from flash will be saved here
535 static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers) argument
551 *layers = ICE_SCHED_5_LAYERS;
553 *layers = ICE_SCHED_9_LAYERS;
564 * @layers: value to be saved in flash
566 * Variable "layers" defines user's preference about number of layers in Tx
572 static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers) argument
587 if (layers == ICE_SCHED_5_LAYERS)
643 "Tx scheduling layers hav
[all...]
/linux-master/drivers/edac/
H A Dxgene_edac.c345 struct edac_mc_layer layers[2]; local
380 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
381 layers[0].size = 4;
382 layers[0].is_virt_csrow = true;
383 layers[1].type = EDAC_MC_LAYER_CHANNEL;
384 layers[1].size = 2;
385 layers[1].is_virt_csrow = false;
386 mci = edac_mc_alloc(tmp_ctx.mcu_id, ARRAY_SIZE(layers), layers,
H A Dskx_common.c452 struct edac_mc_layer layers[2]; local
457 layers[0].type = EDAC_MC_LAYER_CHANNEL;
458 layers[0].size = NUM_CHANNELS;
459 layers[0].is_virt_csrow = false;
460 layers[1].type = EDAC_MC_LAYER_SLOT;
461 layers[1].size = NUM_DIMMS;
462 layers[1].is_virt_csrow = true;
463 mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
H A Dedac_mc_sysfs.c849 edac_layer_name[mci->layers[i].type],
850 mci->layers[i].size - 1);
H A Dcpc925_edac.c910 struct edac_mc_layer layers[2]; local
948 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
949 layers[0].size = CPC925_NR_CSROWS;
950 layers[0].is_virt_csrow = true;
951 layers[1].type = EDAC_MC_LAYER_CHANNEL;
952 layers[1].size = nr_channels;
953 layers[1].is_virt_csrow = false;
954 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
H A Darmada_xp_edac.c288 struct edac_mc_layer layers[1]; local
306 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
307 layers[0].size = SDRAM_NUM_CS;
308 layers[0].is_virt_csrow = true;
310 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*drvdata));
H A Daltera_edac.c283 struct edac_mc_layer layers[2]; local
354 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
355 layers[0].size = 1;
356 layers[0].is_virt_csrow = true;
357 layers[1].type = EDAC_MC_LAYER_CHANNEL;
358 layers[1].size = 1;
359 layers[1].is_virt_csrow = false;
360 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
H A Dsynopsys_edac.c1346 struct edac_mc_layer layers[2]; local
1365 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1366 layers[0].size = SYNPS_EDAC_NR_CSROWS;
1367 layers[0].is_virt_csrow = true;
1368 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1369 layers[1].size = SYNPS_EDAC_NR_CHANS;
1370 layers[1].is_virt_csrow = false;
1372 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
H A Dversal_edac.c1079 struct edac_mc_layer layers[2]; local
1109 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1110 layers[0].size = num_csrows;
1111 layers[0].is_virt_csrow = true;
1112 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1113 layers[1].size = num_chans;
1114 layers[1].is_virt_csrow = false;
1116 mci = edac_mc_alloc(edac_mc_id, ARRAY_SIZE(layers), layers,
H A Digen6_edac.c1167 struct edac_mc_layer layers[2]; local
1182 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1183 layers[0].size = NUM_CHANNELS;
1184 layers[0].is_virt_csrow = false;
1185 layers[1].type = EDAC_MC_LAYER_SLOT;
1186 layers[1].size = NUM_DIMMS;
1187 layers[1].is_virt_csrow = true;
1189 mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers, 0);
H A Damd64_edac.c3936 * For heterogeneous and APU models EDAC CHIP_SELECT and CHANNEL layers
3937 * should be swapped to fit into the layers.
3954 struct edac_mc_layer layers[2]; local
3957 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3958 layers[0].size = get_layer_size(pvt, 0);
3959 layers[0].is_virt_csrow = true;
3960 layers[1].type = EDAC_MC_LAYER_CHANNEL;
3961 layers[1].size = get_layer_size(pvt, 1);
3962 layers[1].is_virt_csrow = false;
3964 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layer
[all...]
/linux-master/arch/parisc/kernel/
H A Ddrivers.c1042 pr_cont("\t.layers = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n",
1043 mod_path.layers[0], mod_path.layers[1], mod_path.layers[2],
1044 mod_path.layers[3], mod_path.layers[4], mod_path.layers[5]);
H A Dfirmware.c1386 PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers),
1414 PAGE0->mem_kbd.spa, __pa(PAGE0->mem_kbd.dp.layers),
/linux-master/include/linux/
H A Dedac.h373 * Maximum number of layers used by the memory controller to uniquely
377 * some code there that are optimized for 3 layers.
554 struct edac_mc_layer *layers; member in struct:mem_ctl_info
632 * For 2 layers, this function is similar to allocating a two-dimensional
635 * For 3 layers, this function is similar to allocating a tri-dimensional
651 index = index * mci->layers[1].size + layer1;
654 index = index * mci->layers[2].size + layer2;
/linux-master/include/uapi/drm/
H A Dpvr_drm.h912 /** @tpc_stride: [IN] Stride between layers in TPC, in pages */
974 /** @layers: [IN] Number of layers. */
975 __u32 layers; member in struct:drm_pvr_ioctl_create_hwrt_dataset_args

Completed in 298 milliseconds

1234