Lines Matching defs:hdev

32 	bt_dev_dbg(hdev, \
34 hci_dmp_cb(skb)->pkt_type, hdev->dump.state)
51 static int hci_devcd_update_state(struct hci_dev *hdev, int state)
53 bt_dev_dbg(hdev, "Updating devcoredump state from %d to %d.",
54 hdev->dump.state, state);
56 hdev->dump.state = state;
58 return hci_devcd_update_hdr_state(hdev->dump.head,
59 hdev->dump.alloc_size, state);
62 static int hci_devcd_mkheader(struct hci_dev *hdev, struct sk_buff *skb)
72 if (hdev->dump.dmp_hdr)
73 hdev->dump.dmp_hdr(hdev, skb);
81 static void hci_devcd_notify(struct hci_dev *hdev, int state)
83 if (hdev->dump.notify_change)
84 hdev->dump.notify_change(hdev, state);
88 void hci_devcd_reset(struct hci_dev *hdev)
90 hdev->dump.head = NULL;
91 hdev->dump.tail = NULL;
92 hdev->dump.alloc_size = 0;
94 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
96 cancel_delayed_work(&hdev->dump.dump_timeout);
97 skb_queue_purge(&hdev->dump.dump_q);
101 static void hci_devcd_free(struct hci_dev *hdev)
103 vfree(hdev->dump.head);
105 hci_devcd_reset(hdev);
109 static int hci_devcd_alloc(struct hci_dev *hdev, u32 size)
111 hdev->dump.head = vmalloc(size);
112 if (!hdev->dump.head)
115 hdev->dump.alloc_size = size;
116 hdev->dump.tail = hdev->dump.head;
117 hdev->dump.end = hdev->dump.head + size;
119 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
125 static bool hci_devcd_copy(struct hci_dev *hdev, char *buf, u32 size)
127 if (hdev->dump.tail + size > hdev->dump.end)
130 memcpy(hdev->dump.tail, buf, size);
131 hdev->dump.tail += size;
137 static bool hci_devcd_memset(struct hci_dev *hdev, u8 pattern, u32 len)
139 if (hdev->dump.tail + len > hdev->dump.end)
142 memset(hdev->dump.tail, pattern, len);
143 hdev->dump.tail += len;
149 static int hci_devcd_prepare(struct hci_dev *hdev, u32 dump_size)
159 dump_hdr_size = hci_devcd_mkheader(hdev, skb);
161 if (hci_devcd_alloc(hdev, dump_hdr_size + dump_size)) {
167 if (!hci_devcd_copy(hdev, skb->data, skb->len)) {
168 bt_dev_err(hdev, "Failed to insert header");
169 hci_devcd_free(hdev);
181 static void hci_devcd_handle_pkt_init(struct hci_dev *hdev, struct sk_buff *skb)
185 if (hdev->dump.state != HCI_DEVCOREDUMP_IDLE) {
191 bt_dev_dbg(hdev, "Invalid dump init pkt");
197 bt_dev_err(hdev, "Zero size dump init pkt");
201 if (hci_devcd_prepare(hdev, dump_size)) {
202 bt_dev_err(hdev, "Failed to prepare for dump");
206 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ACTIVE);
207 queue_delayed_work(hdev->workqueue, &hdev->dump.dump_timeout,
208 hdev->dump.timeout);
211 static void hci_devcd_handle_pkt_skb(struct hci_dev *hdev, struct sk_buff *skb)
213 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
218 if (!hci_devcd_copy(hdev, skb->data, skb->len))
219 bt_dev_dbg(hdev, "Failed to insert skb");
222 static void hci_devcd_handle_pkt_pattern(struct hci_dev *hdev,
227 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
233 bt_dev_dbg(hdev, "Invalid pattern skb");
239 if (!hci_devcd_memset(hdev, pattern->pattern, pattern->len))
240 bt_dev_dbg(hdev, "Failed to set pattern");
243 static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev,
248 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
253 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_DONE);
254 dump_size = hdev->dump.tail - hdev->dump.head;
256 bt_dev_dbg(hdev, "complete with size %u (expect %zu)", dump_size,
257 hdev->dump.alloc_size);
259 dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
262 static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev,
267 if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
272 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ABORT);
273 dump_size = hdev->dump.tail - hdev->dump.head;
275 bt_dev_dbg(hdev, "aborted with size %u (expect %zu)", dump_size,
276 hdev->dump.alloc_size);
279 dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
314 struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_rx);
318 while ((skb = skb_dequeue(&hdev->dump.dump_q))) {
322 if (hdev->dump.state == HCI_DEVCOREDUMP_TIMEOUT) {
327 hci_dev_lock(hdev);
328 start_state = hdev->dump.state;
332 hci_devcd_handle_pkt_init(hdev, skb);
336 hci_devcd_handle_pkt_skb(hdev, skb);
340 hci_devcd_handle_pkt_pattern(hdev, skb);
344 hci_devcd_handle_pkt_complete(hdev, skb);
348 hci_devcd_handle_pkt_abort(hdev, skb);
352 bt_dev_dbg(hdev, "Unknown packet (%d) for state (%d). ",
353 hci_dmp_cb(skb)->pkt_type, hdev->dump.state);
357 hci_dev_unlock(hdev);
363 if (start_state != hdev->dump.state)
364 hci_devcd_notify(hdev, hdev->dump.state);
367 hci_dev_lock(hdev);
368 if (hdev->dump.state == HCI_DEVCOREDUMP_DONE ||
369 hdev->dump.state == HCI_DEVCOREDUMP_ABORT)
370 hci_devcd_reset(hdev);
371 hci_dev_unlock(hdev);
378 struct hci_dev *hdev = container_of(work, struct hci_dev,
382 hci_devcd_notify(hdev, HCI_DEVCOREDUMP_TIMEOUT);
384 hci_dev_lock(hdev);
386 cancel_work(&hdev->dump.dump_rx);
388 hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_TIMEOUT);
390 dump_size = hdev->dump.tail - hdev->dump.head;
391 bt_dev_dbg(hdev, "timeout with size %u (expect %zu)", dump_size,
392 hdev->dump.alloc_size);
395 dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
397 hci_devcd_reset(hdev);
399 hci_dev_unlock(hdev);
403 int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump,
415 hci_dev_lock(hdev);
416 hdev->dump.coredump = coredump;
417 hdev->dump.dmp_hdr = dmp_hdr;
418 hdev->dump.notify_change = notify_change;
419 hdev->dump.supported = true;
420 hdev->dump.timeout = DEVCOREDUMP_TIMEOUT;
421 hci_dev_unlock(hdev);
427 static inline bool hci_devcd_enabled(struct hci_dev *hdev)
429 return hdev->dump.supported;
432 int hci_devcd_init(struct hci_dev *hdev, u32 dump_size)
436 if (!hci_devcd_enabled(hdev))
446 skb_queue_tail(&hdev->dump.dump_q, skb);
447 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
453 int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb)
458 if (!hci_devcd_enabled(hdev)) {
465 skb_queue_tail(&hdev->dump.dump_q, skb);
466 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
472 int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len)
477 if (!hci_devcd_enabled(hdev))
490 skb_queue_tail(&hdev->dump.dump_q, skb);
491 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
497 int hci_devcd_complete(struct hci_dev *hdev)
501 if (!hci_devcd_enabled(hdev))
510 skb_queue_tail(&hdev->dump.dump_q, skb);
511 queue_work(hdev->workqueue, &hdev->dump.dump_rx);
517 int hci_devcd_abort(struct hci_dev *hdev)
521 if (!hci_devcd_enabled(hdev))
530 skb_queue_tail(&hdev->dump.dump_q, skb);
531 queue_work(hdev->workqueue, &hdev->dump.dump_rx);