Lines Matching defs:hdev

35 	struct hid_device *hdev;
75 static int hid_bpf_program_count(struct hid_device *hdev,
90 if (hdev && entry->hdev != hdev)
107 int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
114 prog_list = rcu_dereference(hdev->bpf.progs[type]);
142 static void __hid_bpf_set_hdev_progs(struct hid_device *hdev, struct hid_bpf_prog_list *new_list,
147 spin_lock(&hdev->bpf.progs_lock);
148 old_list = rcu_dereference_protected(hdev->bpf.progs[type],
149 lockdep_is_held(&hdev->bpf.progs_lock));
150 rcu_assign_pointer(hdev->bpf.progs[type], new_list);
151 spin_unlock(&hdev->bpf.progs_lock);
162 static int hid_bpf_populate_hdev(struct hid_device *hdev, enum hid_bpf_prog_type type)
167 if (type >= HID_BPF_PROG_TYPE_MAX || !hdev)
170 if (hdev->bpf.destroyed)
180 if (entry->type == type && entry->hdev == hdev &&
185 __hid_bpf_set_hdev_progs(hdev, new_list, type);
215 struct hid_device *hdev;
221 if (entry->hdev) {
222 hdev = entry->hdev;
225 * hdev is still valid, even if we are called after hid_destroy_device():
229 hdev_destroyed = hdev->bpf.destroyed;
231 hid_bpf_populate_hdev(hdev, type);
233 /* mark all other disabled progs from hdev of the given type as detached */
242 if (next->hdev == hdev && next->type == type) {
244 * clear the hdev reference and decrement the device ref
248 next->hdev = NULL;
249 put_device(&hdev->dev);
255 hid_bpf_reconnect(hdev);
397 __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
417 cnt = hid_bpf_program_count(hdev, NULL, prog_type);
448 prog_entry->hdev = hdev;
452 err = hid_bpf_populate_hdev(hdev, prog_type);
476 void __hid_bpf_destroy_device(struct hid_device *hdev)
484 prog_list = rcu_dereference(hdev->bpf.progs[type]);
496 __hid_bpf_set_hdev_progs(hdev, NULL, type);