Searched refs:work (Results 201 - 225 of 3039) sorted by relevance

1234567891011>>

/linux-master/drivers/md/
H A Ddm-cache-policy-internal.h25 struct policy_work **work)
28 *work = NULL;
32 return p->lookup_with_work(p, oblock, cblock, data_dir, fast_copy, work);
42 struct policy_work *work,
45 return p->complete_background_work(p, work, success);
22 policy_lookup_with_work(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock, int data_dir, bool fast_copy, struct policy_work **work) argument
41 policy_complete_background_work(struct dm_cache_policy *p, struct policy_work *work, bool success) argument
/linux-master/include/trace/events/
H A Dsched.h56 * sched_kthread_work_queue_work - called when a work gets queued
58 * @work: pointer to struct kthread_work
60 * This event occurs when a work is queued immediately or once a
61 * delayed work is actually queued (ie: once the delay has been
67 struct kthread_work *work),
69 TP_ARGS(worker, work),
72 __field( void *, work )
78 __entry->work = work;
79 __entry->function = work
[all...]
/linux-master/drivers/usb/misc/
H A Dappledisplay.c7 * Thanks to Caskey L. Dickson for his work with acdctl.
47 /* table of devices that work with this driver */
70 struct delayed_work work; member in struct:appledisplay
110 schedule_delayed_work(&pdata->work, 0);
186 static void appledisplay_work(struct work_struct *work) argument
189 container_of(work, struct appledisplay, work.work);
198 schedule_delayed_work(&pdata->work, HZ / 8);
231 INIT_DELAYED_WORK(&pdata->work, appledisplay_wor
[all...]
/linux-master/drivers/infiniband/hw/erdma/
H A Derdma_cm.c120 struct erdma_cm_work *work; local
123 work = list_entry(w, struct erdma_cm_work, list);
124 list_del(&work->list);
125 kfree(work);
133 if (cancel_delayed_work(&cep->mpa_timer->work)) {
142 static void erdma_put_work(struct erdma_cm_work *work) argument
144 INIT_LIST_HEAD(&work->list);
145 spin_lock_bh(&work->cep->lock);
146 list_add(&work->list, &work
202 struct erdma_cm_work *work = NULL; local
217 struct erdma_cm_work *work; local
740 struct erdma_cm_work *work; local
894 struct erdma_cm_work *work = erdma_get_work(cep); local
[all...]
/linux-master/drivers/gpu/drm/msm/
H A Dmsm_gpu_devfreq.c130 static void msm_devfreq_boost_work(struct kthread_work *work);
131 static void msm_devfreq_idle_work(struct kthread_work *work);
202 kthread_cancel_work_sync(&df->idle_work.work);
208 kthread_cancel_work_sync(&df->boost_work.work);
256 static void msm_devfreq_boost_work(struct kthread_work *work) argument
258 struct msm_gpu_devfreq *df = container_of(work,
259 struct msm_gpu_devfreq, boost_work.work);
315 * We could have become active again before the idle work had a
335 static void msm_devfreq_idle_work(struct kthread_work *work) argument
337 struct msm_gpu_devfreq *df = container_of(work,
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dhv_vhca.c70 static void mlx5_hv_vhca_invalidate_work(struct work_struct *work) argument
76 hwork = container_of(work, struct mlx5_hv_vhca_work, invalidate_work);
99 struct mlx5_hv_vhca_work *work; local
101 work = kzalloc(sizeof(*work), GFP_ATOMIC);
102 if (!work)
105 INIT_WORK(&work->invalidate_work, mlx5_hv_vhca_invalidate_work);
106 work->hv_vhca = hv_vhca;
107 work->block_mask = block_mask;
109 queue_work(hv_vhca->work_queue, &work
[all...]
/linux-master/drivers/media/platform/mediatek/mdp3/
H A Dmtk-mdp3-vpu.c35 if (!vpu->work) {
36 vpu->work = dma_alloc_wc(dev, vpu->work_size,
38 if (!vpu->work)
52 dma_free_wc(dev, vpu->work_size, vpu->work, vpu->work_addr);
53 vpu->work = NULL;
73 if (vpu->work && vpu->work_addr)
74 dma_free_wc(dev, vpu->work_size, vpu->work, vpu->work_addr);
224 "VPU param:%pK pa:%pad sz:%zx, work:%pK pa:%pad sz:%zx, config:%pK pa:%pad sz:%zx",
226 vpu->work, &vpu->work_addr, vpu->work_size,
274 memset(vpu->work,
[all...]
/linux-master/fs/smb/server/
H A Dsmb2misc.c368 int ksmbd_smb2_check_message(struct ksmbd_work *work) argument
370 struct smb2_pdu *pdu = ksmbd_req_buf_next(work);
374 __u32 len = get_rfc1002_len(work->request_buf);
377 if ((u64)work->next_smb2_rcv_hdr_off + next_cmd > len) {
385 else if (work->next_smb2_rcv_hdr_off)
386 len -= work->next_smb2_rcv_hdr_off;
463 if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
464 smb2_validate_credit_charge(work->conn, hdr))
470 int smb2_negotiate_request(struct ksmbd_work *work) argument
472 return ksmbd_smb_negotiate_common(work, SMB2_NEGOTIATE_H
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_display.c51 * amdgpu_display_hotplug_work_func - work handler for display hotplug event
53 * @work: work struct pointer
55 * This is the hotplug event work handler (all ASICs).
56 * The work gets scheduled from the IRQ handler if there
62 * from the IRQ handler to a work handler because hotplug handler has to use
66 void amdgpu_display_hotplug_work_func(struct work_struct *work) argument
68 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
69 hotplug_work.work);
93 struct amdgpu_flip_work *work local
100 amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work, struct dma_fence **f) argument
122 struct amdgpu_flip_work *work = local
164 amdgpu_crtc->crtc_id, amdgpu_crtc, work); local
173 struct amdgpu_flip_work *work = local
200 struct amdgpu_flip_work *work; local
[all...]
/linux-master/drivers/gpu/drm/amd/display/amdgpu_dm/
H A Damdgpu_dm_hdcp.c152 static void link_lock(struct hdcp_workqueue *work, bool lock) argument
156 for (i = 0; i < work->max_link; i++) {
158 mutex_lock(&work[i].mutex);
160 mutex_unlock(&work[i].mutex);
273 static void event_callback(struct work_struct *work) argument
277 hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
292 static void event_property_update(struct work_struct *work) argument
294 struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue,
363 static void event_property_validate(struct work_struct *work) argument
366 container_of(to_delayed_work(work), struc
415 event_watchdog_timer(struct work_struct *work) argument
436 event_cpirq(struct work_struct *work) argument
620 struct hdcp_workqueue *work; local
644 struct hdcp_workqueue *work; local
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dipsec.c62 container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
556 struct mlx5e_ipsec_work *work = local
557 container_of(_work, struct mlx5e_ipsec_work, work);
558 struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
561 attrs = &((struct mlx5e_ipsec_sa_entry *)work->data)->attrs;
584 struct mlx5e_ipsec_work *work = local
585 container_of(_work, struct mlx5e_ipsec_work, work);
586 struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
587 struct mlx5e_ipsec_netevent_data *data = work->data;
609 struct mlx5e_ipsec_work *work; local
973 struct mlx5e_ipsec_work *work = sa_entry->work; local
[all...]
/linux-master/drivers/hv/
H A Dhv_util.c87 struct work_struct work; member in struct:hibernate_work_context
94 static void send_hibernate_uevent(struct work_struct *work) argument
99 ctx = container_of(work, struct hibernate_work_context, work);
110 INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
180 struct work_struct *work = NULL; local
236 work = &shutdown_work;
242 work = &restart_work;
251 work = &hibernate_context.work;
335 hv_set_host_time(struct work_struct *work) argument
[all...]
/linux-master/security/keys/trusted-keys/
H A Dtrusted_tpm2.c37 u8 *work = scratch, *work1; local
54 work = asn1_encode_oid(work, end_work, tpm2key_oid,
65 work = asn1_encode_tag(work, end_work, 0, bool, w - bool);
74 if (WARN(work - scratch + pub_len + priv_len + 14 > SCRATCH_SIZE,
80 work = asn1_encode_integer(work, end_work, options->keyhandle);
81 work = asn1_encode_octet_string(work, end_wor
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dodp.c80 struct work_struct work; member in struct:mlx5_pagefault
190 static void free_implicit_child_mr_work(struct work_struct *work) argument
193 container_of(work, struct mlx5_ib_mr, odp_destroy.work);
221 /* Freeing a MR is a sleeping operation, so bounce to a work queue */
222 INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
223 queue_work(system_unbound_wq, &mr->odp_destroy.work);
1343 * work-queue is being fenced. */
1376 static void mlx5_ib_eqe_pf_action(struct work_struct *work) argument
1378 struct mlx5_pagefault *pfault = container_of(work,
1501 mlx5_ib_eq_pf_action(struct work_struct *work) argument
1643 struct work_struct work; member in struct:prefetch_mr_work
1653 destroy_prefetch_work(struct prefetch_mr_work *work) argument
1704 struct prefetch_mr_work *work = local
1724 init_prefetch_work(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, u32 pf_flags, struct prefetch_mr_work *work, struct ib_sge *sg_list, u32 num_sge) argument
1783 struct prefetch_mr_work *work; local
[all...]
/linux-master/sound/soc/codecs/
H A Dpcm1789.c31 struct work_struct work; member in struct:pcm1789_private
131 static void pcm1789_work_queue(struct work_struct *work) argument
133 struct pcm1789_private *priv = container_of(work,
135 work);
154 schedule_work(&priv->work);
254 INIT_WORK(&pcm1789->work, pcm1789_work_queue);
265 flush_work(&priv->work);
/linux-master/drivers/input/misc/
H A Dregulator-haptic.c25 struct work_struct work; member in struct:regulator_haptic
81 static void regulator_haptic_work(struct work_struct *work) argument
83 struct regulator_haptic *haptic = container_of(work,
84 struct regulator_haptic, work);
103 schedule_work(&haptic->work);
112 cancel_work_sync(&haptic->work);
157 INIT_WORK(&haptic->work, regulator_haptic_work);
H A Ddrv2665.c50 * @work: Work item used to off load the enable/disable of the vibration
57 struct work_struct work; member in struct:drv2665_data
76 static void drv2665_worker(struct work_struct *work) argument
79 container_of(work, struct drv2665_data, work);
108 schedule_work(&haptics->work);
118 cancel_work_sync(&haptics->work);
196 INIT_WORK(&haptics->work, drv2665_worker);
H A Dpm8xxx-vibrator.c70 * @work: work structure to set the vibration parameters
83 struct work_struct work; member in struct:pm8xxx_vib
137 * @work: pointer to work_struct
139 static void pm8xxx_work_handler(struct work_struct *work) argument
141 struct pm8xxx_vib *vib = container_of(work, struct pm8xxx_vib, work);
175 cancel_work_sync(&vib->work);
197 schedule_work(&vib->work);
222 INIT_WORK(&vib->work, pm8xxx_work_handle
[all...]
/linux-master/drivers/gpu/drm/i915/gt/uc/
H A Dintel_gsc_uc.c16 static void gsc_work(struct work_struct *work) argument
18 struct intel_gsc_uc *gsc = container_of(work, typeof(*gsc), work);
128 INIT_WORK(&gsc->work, gsc_work);
166 * guaranteed to always work as long as the GPU itself is awake (which
258 flush_work(&gsc->work);
279 flush_work(&gsc->work);
315 queue_work(gsc->wq, &gsc->work);
/linux-master/mm/
H A Dpage_reporting.c79 * Delay the start of work to allow a sizable queue to build. For
83 schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
307 static void page_reporting_process(struct work_struct *work) argument
309 struct delayed_work *d_work = to_delayed_work(work);
311 container_of(d_work, struct page_reporting_dev_info, work);
346 schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
379 /* initialize state and work structures */
381 INIT_DELAYED_WORK(&prdev->work, &page_reporting_process);
411 /* Flush any existing work, and lock it out */
412 cancel_delayed_work_sync(&prdev->work);
[all...]
/linux-master/kernel/irq/
H A Dirq_sim.c15 struct irq_work work; member in struct:irq_sim_work_ctx
81 irq_work_queue(&irq_ctx->work_ctx->work);
100 static void irq_sim_handle_irq(struct irq_work *work) argument
106 work_ctx = container_of(work, struct irq_sim_work_ctx, work);
184 work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq);
201 irq_work_sync(&work_ctx->work);
/linux-master/drivers/gpu/drm/xe/
H A Dxe_devcoredump.c49 * at least while the coredump device is alive. Dev_coredump has a delayed work
66 static void xe_devcoredump_deferred_snap_work(struct work_struct *work) argument
68 struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
95 /* Ensure delayed work is captured before continuing */
96 flush_work(&ss->work);
142 cancel_work_sync(&coredump->snapshot.work);
176 INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
206 queue_work(system_unbound_wq, &ss->work);
/linux-master/drivers/platform/x86/
H A Dlenovo-yoga-tab2-pro-1380-fastcharger.c45 struct work_struct work; member in struct:yt2_1380_fc
74 static void yt2_1380_fc_worker(struct work_struct *work) argument
76 struct yt2_1380_fc *fc = container_of(work, struct yt2_1380_fc, work);
132 schedule_work(&fc->work);
163 INIT_WORK(&fc->work, yt2_1380_fc_worker);
216 schedule_work(&fc->work);
/linux-master/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
H A Dbtcoex.c61 * @work: DHCP state machine work
83 struct work_struct work; member in struct:brcmf_btcoex_info
279 schedule_work(&bt_local->work);
283 * brcmf_btcoex_handler() - BT coex state machine work handler
284 * @work: work
286 static void brcmf_btcoex_handler(struct work_struct *work) argument
289 btci = container_of(work, struct brcmf_btcoex_info, work);
[all...]
/linux-master/drivers/isdn/mISDN/
H A Dtimerdev.c32 u_int work; member in struct:mISDNtimerdev
57 dev->work = 0;
108 while (list_empty(list) && (dev->work == 0)) {
112 wait_event_interruptible(dev->wait, (dev->work ||
118 if (dev->work)
119 dev->work = 0;
146 if (dev->work || !list_empty(&dev->expired))
149 printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__,
150 dev->work, list_empty(&dev->expired));
175 dev->work
[all...]

Completed in 1321 milliseconds

1234567891011>>