1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2023 Google Corporation
4 */
5
6#include <linux/devcoredump.h>
7
8#include <asm/unaligned.h>
9#include <net/bluetooth/bluetooth.h>
10#include <net/bluetooth/hci_core.h>
11
12enum hci_devcoredump_pkt_type {
13	HCI_DEVCOREDUMP_PKT_INIT,
14	HCI_DEVCOREDUMP_PKT_SKB,
15	HCI_DEVCOREDUMP_PKT_PATTERN,
16	HCI_DEVCOREDUMP_PKT_COMPLETE,
17	HCI_DEVCOREDUMP_PKT_ABORT,
18};
19
20struct hci_devcoredump_skb_cb {
21	u16 pkt_type;
22};
23
24struct hci_devcoredump_skb_pattern {
25	u8 pattern;
26	u32 len;
27} __packed;
28
29#define hci_dmp_cb(skb)	((struct hci_devcoredump_skb_cb *)((skb)->cb))
30
31#define DBG_UNEXPECTED_STATE() \
32	bt_dev_dbg(hdev, \
33		   "Unexpected packet (%d) for state (%d). ", \
34		   hci_dmp_cb(skb)->pkt_type, hdev->dump.state)
35
36#define MAX_DEVCOREDUMP_HDR_SIZE	512	/* bytes */
37
38static int hci_devcd_update_hdr_state(char *buf, size_t size, int state)
39{
40	int len = 0;
41
42	if (!buf)
43		return 0;
44
45	len = scnprintf(buf, size, "Bluetooth devcoredump\nState: %d\n", state);
46
47	return len + 1; /* scnprintf adds \0 at the end upon state rewrite */
48}
49
50/* Call with hci_dev_lock only. */
51static int hci_devcd_update_state(struct hci_dev *hdev, int state)
52{
53	bt_dev_dbg(hdev, "Updating devcoredump state from %d to %d.",
54		   hdev->dump.state, state);
55
56	hdev->dump.state = state;
57
58	return hci_devcd_update_hdr_state(hdev->dump.head,
59					  hdev->dump.alloc_size, state);
60}
61
62static int hci_devcd_mkheader(struct hci_dev *hdev, struct sk_buff *skb)
63{
64	char dump_start[] = "--- Start dump ---\n";
65	char hdr[80];
66	int hdr_len;
67
68	hdr_len = hci_devcd_update_hdr_state(hdr, sizeof(hdr),
69					     HCI_DEVCOREDUMP_IDLE);
70	skb_put_data(skb, hdr, hdr_len);
71
72	if (hdev->dump.dmp_hdr)
73		hdev->dump.dmp_hdr(hdev, skb);
74
75	skb_put_data(skb, dump_start, strlen(dump_start));
76
77	return skb->len;
78}
79
80/* Do not call with hci_dev_lock since this calls driver code. */
81static void hci_devcd_notify(struct hci_dev *hdev, int state)
82{
83	if (hdev->dump.notify_change)
84		hdev->dump.notify_change(hdev, state);
85}
86
87/* Call with hci_dev_lock only. */
88void hci_devcd_reset(struct hci_dev *hdev)
89{
90	hdev->dump.head = NULL;
91	hdev->dump.tail = NULL;
92	hdev->dump.alloc_size = 0;
93
94	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
95
96	cancel_delayed_work(&hdev->dump.dump_timeout);
97	skb_queue_purge(&hdev->dump.dump_q);
98}
99
100/* Call with hci_dev_lock only. */
101static void hci_devcd_free(struct hci_dev *hdev)
102{
103	vfree(hdev->dump.head);
104
105	hci_devcd_reset(hdev);
106}
107
108/* Call with hci_dev_lock only. */
109static int hci_devcd_alloc(struct hci_dev *hdev, u32 size)
110{
111	hdev->dump.head = vmalloc(size);
112	if (!hdev->dump.head)
113		return -ENOMEM;
114
115	hdev->dump.alloc_size = size;
116	hdev->dump.tail = hdev->dump.head;
117	hdev->dump.end = hdev->dump.head + size;
118
119	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
120
121	return 0;
122}
123
124/* Call with hci_dev_lock only. */
125static bool hci_devcd_copy(struct hci_dev *hdev, char *buf, u32 size)
126{
127	if (hdev->dump.tail + size > hdev->dump.end)
128		return false;
129
130	memcpy(hdev->dump.tail, buf, size);
131	hdev->dump.tail += size;
132
133	return true;
134}
135
136/* Call with hci_dev_lock only. */
137static bool hci_devcd_memset(struct hci_dev *hdev, u8 pattern, u32 len)
138{
139	if (hdev->dump.tail + len > hdev->dump.end)
140		return false;
141
142	memset(hdev->dump.tail, pattern, len);
143	hdev->dump.tail += len;
144
145	return true;
146}
147
148/* Call with hci_dev_lock only. */
149static int hci_devcd_prepare(struct hci_dev *hdev, u32 dump_size)
150{
151	struct sk_buff *skb;
152	int dump_hdr_size;
153	int err = 0;
154
155	skb = alloc_skb(MAX_DEVCOREDUMP_HDR_SIZE, GFP_ATOMIC);
156	if (!skb)
157		return -ENOMEM;
158
159	dump_hdr_size = hci_devcd_mkheader(hdev, skb);
160
161	if (hci_devcd_alloc(hdev, dump_hdr_size + dump_size)) {
162		err = -ENOMEM;
163		goto hdr_free;
164	}
165
166	/* Insert the device header */
167	if (!hci_devcd_copy(hdev, skb->data, skb->len)) {
168		bt_dev_err(hdev, "Failed to insert header");
169		hci_devcd_free(hdev);
170
171		err = -ENOMEM;
172		goto hdr_free;
173	}
174
175hdr_free:
176	kfree_skb(skb);
177
178	return err;
179}
180
181static void hci_devcd_handle_pkt_init(struct hci_dev *hdev, struct sk_buff *skb)
182{
183	u32 dump_size;
184
185	if (hdev->dump.state != HCI_DEVCOREDUMP_IDLE) {
186		DBG_UNEXPECTED_STATE();
187		return;
188	}
189
190	if (skb->len != sizeof(dump_size)) {
191		bt_dev_dbg(hdev, "Invalid dump init pkt");
192		return;
193	}
194
195	dump_size = get_unaligned_le32(skb_pull_data(skb, 4));
196	if (!dump_size) {
197		bt_dev_err(hdev, "Zero size dump init pkt");
198		return;
199	}
200
201	if (hci_devcd_prepare(hdev, dump_size)) {
202		bt_dev_err(hdev, "Failed to prepare for dump");
203		return;
204	}
205
206	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ACTIVE);
207	queue_delayed_work(hdev->workqueue, &hdev->dump.dump_timeout,
208			   hdev->dump.timeout);
209}
210
211static void hci_devcd_handle_pkt_skb(struct hci_dev *hdev, struct sk_buff *skb)
212{
213	if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
214		DBG_UNEXPECTED_STATE();
215		return;
216	}
217
218	if (!hci_devcd_copy(hdev, skb->data, skb->len))
219		bt_dev_dbg(hdev, "Failed to insert skb");
220}
221
222static void hci_devcd_handle_pkt_pattern(struct hci_dev *hdev,
223					 struct sk_buff *skb)
224{
225	struct hci_devcoredump_skb_pattern *pattern;
226
227	if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
228		DBG_UNEXPECTED_STATE();
229		return;
230	}
231
232	if (skb->len != sizeof(*pattern)) {
233		bt_dev_dbg(hdev, "Invalid pattern skb");
234		return;
235	}
236
237	pattern = skb_pull_data(skb, sizeof(*pattern));
238
239	if (!hci_devcd_memset(hdev, pattern->pattern, pattern->len))
240		bt_dev_dbg(hdev, "Failed to set pattern");
241}
242
243static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev,
244					  struct sk_buff *skb)
245{
246	u32 dump_size;
247
248	if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
249		DBG_UNEXPECTED_STATE();
250		return;
251	}
252
253	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_DONE);
254	dump_size = hdev->dump.tail - hdev->dump.head;
255
256	bt_dev_dbg(hdev, "complete with size %u (expect %zu)", dump_size,
257		   hdev->dump.alloc_size);
258
259	dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
260}
261
262static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev,
263				       struct sk_buff *skb)
264{
265	u32 dump_size;
266
267	if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
268		DBG_UNEXPECTED_STATE();
269		return;
270	}
271
272	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ABORT);
273	dump_size = hdev->dump.tail - hdev->dump.head;
274
275	bt_dev_dbg(hdev, "aborted with size %u (expect %zu)", dump_size,
276		   hdev->dump.alloc_size);
277
278	/* Emit a devcoredump with the available data */
279	dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
280}
281
282/* Bluetooth devcoredump state machine.
283 *
284 * Devcoredump states:
285 *
286 *      HCI_DEVCOREDUMP_IDLE: The default state.
287 *
288 *      HCI_DEVCOREDUMP_ACTIVE: A devcoredump will be in this state once it has
289 *              been initialized using hci_devcd_init(). Once active, the driver
290 *              can append data using hci_devcd_append() or insert a pattern
291 *              using hci_devcd_append_pattern().
292 *
293 *      HCI_DEVCOREDUMP_DONE: Once the dump collection is complete, the drive
294 *              can signal the completion using hci_devcd_complete(). A
295 *              devcoredump is generated indicating the completion event and
296 *              then the state machine is reset to the default state.
297 *
298 *      HCI_DEVCOREDUMP_ABORT: The driver can cancel ongoing dump collection in
299 *              case of any error using hci_devcd_abort(). A devcoredump is
300 *              still generated with the available data indicating the abort
301 *              event and then the state machine is reset to the default state.
302 *
303 *      HCI_DEVCOREDUMP_TIMEOUT: A timeout timer for HCI_DEVCOREDUMP_TIMEOUT sec
304 *              is started during devcoredump initialization. Once the timeout
305 *              occurs, the driver is notified, a devcoredump is generated with
306 *              the available data indicating the timeout event and then the
307 *              state machine is reset to the default state.
308 *
309 * The driver must register using hci_devcd_register() before using the hci
310 * devcoredump APIs.
311 */
312void hci_devcd_rx(struct work_struct *work)
313{
314	struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_rx);
315	struct sk_buff *skb;
316	int start_state;
317
318	while ((skb = skb_dequeue(&hdev->dump.dump_q))) {
319		/* Return if timeout occurs. The timeout handler function
320		 * hci_devcd_timeout() will report the available dump data.
321		 */
322		if (hdev->dump.state == HCI_DEVCOREDUMP_TIMEOUT) {
323			kfree_skb(skb);
324			return;
325		}
326
327		hci_dev_lock(hdev);
328		start_state = hdev->dump.state;
329
330		switch (hci_dmp_cb(skb)->pkt_type) {
331		case HCI_DEVCOREDUMP_PKT_INIT:
332			hci_devcd_handle_pkt_init(hdev, skb);
333			break;
334
335		case HCI_DEVCOREDUMP_PKT_SKB:
336			hci_devcd_handle_pkt_skb(hdev, skb);
337			break;
338
339		case HCI_DEVCOREDUMP_PKT_PATTERN:
340			hci_devcd_handle_pkt_pattern(hdev, skb);
341			break;
342
343		case HCI_DEVCOREDUMP_PKT_COMPLETE:
344			hci_devcd_handle_pkt_complete(hdev, skb);
345			break;
346
347		case HCI_DEVCOREDUMP_PKT_ABORT:
348			hci_devcd_handle_pkt_abort(hdev, skb);
349			break;
350
351		default:
352			bt_dev_dbg(hdev, "Unknown packet (%d) for state (%d). ",
353				   hci_dmp_cb(skb)->pkt_type, hdev->dump.state);
354			break;
355		}
356
357		hci_dev_unlock(hdev);
358		kfree_skb(skb);
359
360		/* Notify the driver about any state changes before resetting
361		 * the state machine
362		 */
363		if (start_state != hdev->dump.state)
364			hci_devcd_notify(hdev, hdev->dump.state);
365
366		/* Reset the state machine if the devcoredump is complete */
367		hci_dev_lock(hdev);
368		if (hdev->dump.state == HCI_DEVCOREDUMP_DONE ||
369		    hdev->dump.state == HCI_DEVCOREDUMP_ABORT)
370			hci_devcd_reset(hdev);
371		hci_dev_unlock(hdev);
372	}
373}
374EXPORT_SYMBOL(hci_devcd_rx);
375
376void hci_devcd_timeout(struct work_struct *work)
377{
378	struct hci_dev *hdev = container_of(work, struct hci_dev,
379					    dump.dump_timeout.work);
380	u32 dump_size;
381
382	hci_devcd_notify(hdev, HCI_DEVCOREDUMP_TIMEOUT);
383
384	hci_dev_lock(hdev);
385
386	cancel_work(&hdev->dump.dump_rx);
387
388	hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_TIMEOUT);
389
390	dump_size = hdev->dump.tail - hdev->dump.head;
391	bt_dev_dbg(hdev, "timeout with size %u (expect %zu)", dump_size,
392		   hdev->dump.alloc_size);
393
394	/* Emit a devcoredump with the available data */
395	dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
396
397	hci_devcd_reset(hdev);
398
399	hci_dev_unlock(hdev);
400}
401EXPORT_SYMBOL(hci_devcd_timeout);
402
403int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump,
404		       dmp_hdr_t dmp_hdr, notify_change_t notify_change)
405{
406	/* Driver must implement coredump() and dmp_hdr() functions for
407	 * bluetooth devcoredump. The coredump() should trigger a coredump
408	 * event on the controller when the device's coredump sysfs entry is
409	 * written to. The dmp_hdr() should create a dump header to identify
410	 * the controller/fw/driver info.
411	 */
412	if (!coredump || !dmp_hdr)
413		return -EINVAL;
414
415	hci_dev_lock(hdev);
416	hdev->dump.coredump = coredump;
417	hdev->dump.dmp_hdr = dmp_hdr;
418	hdev->dump.notify_change = notify_change;
419	hdev->dump.supported = true;
420	hdev->dump.timeout = DEVCOREDUMP_TIMEOUT;
421	hci_dev_unlock(hdev);
422
423	return 0;
424}
425EXPORT_SYMBOL(hci_devcd_register);
426
427static inline bool hci_devcd_enabled(struct hci_dev *hdev)
428{
429	return hdev->dump.supported;
430}
431
432int hci_devcd_init(struct hci_dev *hdev, u32 dump_size)
433{
434	struct sk_buff *skb;
435
436	if (!hci_devcd_enabled(hdev))
437		return -EOPNOTSUPP;
438
439	skb = alloc_skb(sizeof(dump_size), GFP_ATOMIC);
440	if (!skb)
441		return -ENOMEM;
442
443	hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_INIT;
444	put_unaligned_le32(dump_size, skb_put(skb, 4));
445
446	skb_queue_tail(&hdev->dump.dump_q, skb);
447	queue_work(hdev->workqueue, &hdev->dump.dump_rx);
448
449	return 0;
450}
451EXPORT_SYMBOL(hci_devcd_init);
452
453int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb)
454{
455	if (!skb)
456		return -ENOMEM;
457
458	if (!hci_devcd_enabled(hdev)) {
459		kfree_skb(skb);
460		return -EOPNOTSUPP;
461	}
462
463	hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_SKB;
464
465	skb_queue_tail(&hdev->dump.dump_q, skb);
466	queue_work(hdev->workqueue, &hdev->dump.dump_rx);
467
468	return 0;
469}
470EXPORT_SYMBOL(hci_devcd_append);
471
472int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len)
473{
474	struct hci_devcoredump_skb_pattern p;
475	struct sk_buff *skb;
476
477	if (!hci_devcd_enabled(hdev))
478		return -EOPNOTSUPP;
479
480	skb = alloc_skb(sizeof(p), GFP_ATOMIC);
481	if (!skb)
482		return -ENOMEM;
483
484	p.pattern = pattern;
485	p.len = len;
486
487	hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_PATTERN;
488	skb_put_data(skb, &p, sizeof(p));
489
490	skb_queue_tail(&hdev->dump.dump_q, skb);
491	queue_work(hdev->workqueue, &hdev->dump.dump_rx);
492
493	return 0;
494}
495EXPORT_SYMBOL(hci_devcd_append_pattern);
496
497int hci_devcd_complete(struct hci_dev *hdev)
498{
499	struct sk_buff *skb;
500
501	if (!hci_devcd_enabled(hdev))
502		return -EOPNOTSUPP;
503
504	skb = alloc_skb(0, GFP_ATOMIC);
505	if (!skb)
506		return -ENOMEM;
507
508	hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_COMPLETE;
509
510	skb_queue_tail(&hdev->dump.dump_q, skb);
511	queue_work(hdev->workqueue, &hdev->dump.dump_rx);
512
513	return 0;
514}
515EXPORT_SYMBOL(hci_devcd_complete);
516
517int hci_devcd_abort(struct hci_dev *hdev)
518{
519	struct sk_buff *skb;
520
521	if (!hci_devcd_enabled(hdev))
522		return -EOPNOTSUPP;
523
524	skb = alloc_skb(0, GFP_ATOMIC);
525	if (!skb)
526		return -ENOMEM;
527
528	hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_ABORT;
529
530	skb_queue_tail(&hdev->dump.dump_q, skb);
531	queue_work(hdev->workqueue, &hdev->dump.dump_rx);
532
533	return 0;
534}
535EXPORT_SYMBOL(hci_devcd_abort);
536