Searched refs:bulk (Results 1 - 25 of 81) sorted by last modified time

1234

/linux-master/net/sched/
H A Dsch_generic.c291 goto bulk;
295 bulk:
/linux-master/net/core/
H A Dpage_pool.c484 const int bulk = PP_ALLOC_CACHE_REFILL; local
490 /* Don't support bulk alloc for high-order pages */
499 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
501 nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
790 /* Approved for bulk recycling in ptr_ring cache */
/linux-master/drivers/scsi/
H A Dppa.c532 * The bulk flag enables some optimisations in the data transfer loops,
552 int fast, bulk, status; local
555 bulk = ((v == READ_6) ||
601 fast = bulk && scsi_pointer->this_residual >= PPA_BURST_SIZE ?
H A Dimm.c628 * The bulk flag enables some optimisations in the data transfer loops,
648 int fast, bulk, status; local
651 bulk = ((v == READ_6) ||
684 fast = bulk && scsi_pointer->this_residual >=
688 fast = bulk && scsi_pointer->this_residual >=
/linux-master/drivers/regulator/
H A Dcore.c2782 * this for bulk operations so that the regulators can ramp
4954 struct regulator_bulk_data *bulk = data; local
4956 bulk->ret = regulator_enable(bulk->consumer);
/linux-master/drivers/bluetooth/
H A Dhci_qca.c2248 struct regulator_bulk_data *bulk; local
2252 bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
2253 if (!bulk)
2257 bulk[i].supply = vregs[i].name;
2259 ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
2264 ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA);
2269 qca->vreg_bulk = bulk;
/linux-master/drivers/staging/vc04_services/vchiq-mmal/
H A Dmmal-vchiq.c137 /* actual buffer used to store bulk reply */
149 } bulk; /* bulk data */ member in union:mmal_msg_context::__anon258
177 /* ordered workqueue to process all bulk operations */
253 container_of(work, struct mmal_msg_context, u.bulk.work);
254 struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
262 buffer->length = msg_context->u.bulk.buffer_used;
263 buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
264 buffer->dts = msg_context->u.bulk.dts;
265 buffer->pts = msg_context->u.bulk
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_bo.c1198 struct ttm_lru_bulk_move *bulk, size_t size,
1314 if (bulk)
1315 ttm_bo_set_bulk_move(&bo->ttm, bulk);
1203 ___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, struct xe_tile *tile, struct dma_resv *resv, struct ttm_lru_bulk_move *bulk, size_t size, u16 cpu_caching, enum ttm_bo_type type, u32 flags) argument
H A Dxe_bo.h72 struct ttm_lru_bulk_move *bulk, size_t size,
/linux-master/drivers/remoteproc/
H A Dqcom_wcnss.c431 struct regulator_bulk_data *bulk; local
445 bulk = devm_kcalloc(wcnss->dev,
448 if (!bulk)
452 bulk[i].supply = info[i].name;
454 ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk);
460 regulator_set_voltage(bulk[i].consumer,
465 regulator_set_load(bulk[i].consumer, info[i].load_uA);
468 wcnss->vregs = bulk;
/linux-master/drivers/media/usb/dvb-usb/
H A Ddvb-usb.h114 } bulk; member in union:usb_data_stream_properties::__anon377
203 * @bulk_mode: device supports bulk mode for RC (disable polling mode)
215 bool bulk_mode; /* uses bulk mode */
264 * endpoint which received control messages with bulk transfers. When this
269 * endpoint for responses to control messages sent with bulk transfers via
/linux-master/include/drm/ttm/
H A Dttm_resource.h237 * @first: first res in the bulk move range
238 * @last: last res in the bulk move range
240 * Range of resources for a lru bulk move.
252 * Container for the current bulk move state. Should be used with
255 * ensure that the bulk as a whole is locked for eviction even if only one BO of
256 * the bulk is evicted.
348 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
349 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
/linux-master/drivers/gpu/drm/ttm/
H A Dttm_bo.c84 * ttm_bo_set_bulk_move - update BOs bulk move object
87 * @bulk: bulk move structure
89 * Update the BOs bulk move object, making sure that resources are added/removed
90 * as well. A bulk move allows to move many resource on the LRU at once,
98 struct ttm_lru_bulk_move *bulk)
102 if (bo->bulk_move == bulk)
108 bo->bulk_move = bulk;
97 ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, struct ttm_lru_bulk_move *bulk) argument
H A Dttm_resource.c37 * ttm_lru_bulk_move_init - initialize a bulk move structure
38 * @bulk: the structure to init
42 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
44 memset(bulk, 0, sizeof(*bulk));
49 * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
51 * @bulk: bulk move structure
56 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
62 struct ttm_lru_bulk_move_pos *pos = &bulk
41 ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk) argument
55 ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk) argument
81 ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) argument
99 ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) argument
113 ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res) argument
[all...]
/linux-master/drivers/gpu/drm/msm/
H A Dmsm_mdss.c406 struct clk_bulk_data *bulk; local
413 bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL);
414 if (!bulk)
417 bulk[num_clocks++].id = "iface";
418 bulk[num_clocks++].id = "bus";
419 bulk[num_clocks++].id = "vsync";
421 ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk);
425 *clocks = bulk;
H A Dmsm_drv.h477 struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
H A Dmsm_io_utils.c17 struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count, argument
25 for (i = 0; bulk && i < count; i++) {
26 if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
27 return bulk[i].clk;
/linux-master/drivers/infiniband/hw/mlx5/
H A Ddevx.c1534 u32 bulk = MLX5_GET(alloc_flow_counter_in, local
1538 if (bulk)
1539 bulk = 1 << bulk;
1541 bulk = 128UL * MLX5_GET(alloc_flow_counter_in,
1544 obj->flow_counter_bulk_size = bulk;
/linux-master/drivers/tty/
H A Dsysrq.c1160 bool bulk = false; local
1170 bulk = true;
1174 if (!bulk)
/linux-master/drivers/staging/vc04_services/interface/vchiq_arm/
H A Dvchiq_dev.c340 !waiter->bulk_waiter.bulk) {
341 if (waiter->bulk_waiter.bulk) {
344 waiter->bulk_waiter.bulk->userdata = NULL;
H A Dvchiq_core.h124 int local_insert; /* Where to insert the next local bulk */
125 int remote_insert; /* Where to insert the next remote bulk (master) */
410 struct vchiq_bulk *bulk; member in struct:bulk_waiter
418 * is better to use a bulk transfer
520 int vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
523 void vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk);
H A Dvchiq_core.c1270 get_bulk_reason(struct vchiq_bulk *bulk) argument
1272 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1273 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1279 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1285 /* Called by the slot handler - don't hold the bulk mutex */
1301 struct vchiq_bulk *bulk = local
1305 * Only generate callbacks for non-dummy bulk
1308 if (bulk->data && service->instance) {
1309 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1310 if (bulk
1430 struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)]; local
1718 struct vchiq_bulk *bulk; local
2979 struct vchiq_bulk *bulk; local
[all...]
H A Dvchiq_arm.c627 vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset, argument
640 bulk->data = pagelistinfo->dma_addr;
646 bulk->remote_data = pagelistinfo;
652 vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk) argument
654 if (bulk && bulk->remote_data && bulk->actual)
655 free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
656 bulk->actual);
942 struct vchiq_bulk *bulk local
969 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Dfs_counters.c42 /* Max number of counters to query in bulk read is 32K */
65 struct mlx5_fc_bulk *bulk; member in struct:mlx5_fc
191 /* first id must be aligned to 4 when using bulk query */
201 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
230 if (counter->bulk)
252 "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
265 "Flow counters bulk query buffer size increased, bulk_size(%d)\n",
545 static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, argument
548 counter->bulk = bulk;
552 mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk) argument
560 struct mlx5_fc_bulk *bulk; local
600 mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk) argument
614 mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk) argument
625 mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc) argument
653 struct mlx5_fc_bulk *bulk; local
684 mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk) argument
698 struct mlx5_fc_bulk *bulk; local
746 struct mlx5_fc_bulk *bulk = fc->bulk; local
[all...]
/linux-master/drivers/media/usb/uvc/
H A Duvc_video.c1021 * uvc_video_decode_start is called with URB data at the start of a bulk or
1039 * uvc_video_decode_end is called with header data at the end of a bulk or
1254 * uvc_video_encode_data(). Only bulk transfers are currently supported.
1283 nbytes = min(stream->bulk.max_payload_size - stream->bulk.payload_size,
1472 if (urb->actual_length == 0 && stream->bulk.header_size == 0)
1477 stream->bulk.payload_size += len;
1483 if (stream->bulk.header_size == 0 && !stream->bulk.skip_payload) {
1492 stream->bulk
[all...]

Completed in 407 milliseconds

1234