1// SPDX-License-Identifier: GPL-2.0
2// rc-ir-raw.c - handle IR pulse/space events
3//
4// Copyright (C) 2010 by Mauro Carvalho Chehab
5
6#include <linux/export.h>
7#include <linux/kthread.h>
8#include <linux/mutex.h>
9#include <linux/kmod.h>
10#include <linux/sched.h>
11#include "rc-core-priv.h"
12
13/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
14static LIST_HEAD(ir_raw_client_list);
15
16/* Used to handle IR raw handler extensions */
17DEFINE_MUTEX(ir_raw_handler_lock);
18static LIST_HEAD(ir_raw_handler_list);
19static atomic64_t available_protocols = ATOMIC64_INIT(0);
20
21static int ir_raw_event_thread(void *data)
22{
23	struct ir_raw_event ev;
24	struct ir_raw_handler *handler;
25	struct ir_raw_event_ctrl *raw = data;
26	struct rc_dev *dev = raw->dev;
27
28	while (1) {
29		mutex_lock(&ir_raw_handler_lock);
30		while (kfifo_out(&raw->kfifo, &ev, 1)) {
31			if (is_timing_event(ev)) {
32				if (ev.duration == 0)
33					dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
34				if (is_timing_event(raw->prev_ev) &&
35				    !is_transition(&ev, &raw->prev_ev))
36					dev_warn_once(&dev->dev, "two consecutive events of type %s",
37						      TO_STR(ev.pulse));
38			}
39			list_for_each_entry(handler, &ir_raw_handler_list, list)
40				if (dev->enabled_protocols &
41				    handler->protocols || !handler->protocols)
42					handler->decode(dev, ev);
43			lirc_raw_event(dev, ev);
44			raw->prev_ev = ev;
45		}
46		mutex_unlock(&ir_raw_handler_lock);
47
48		set_current_state(TASK_INTERRUPTIBLE);
49
50		if (kthread_should_stop()) {
51			__set_current_state(TASK_RUNNING);
52			break;
53		} else if (!kfifo_is_empty(&raw->kfifo))
54			set_current_state(TASK_RUNNING);
55
56		schedule();
57	}
58
59	return 0;
60}
61
62/**
63 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
64 * @dev:	the struct rc_dev device descriptor
65 * @ev:		the struct ir_raw_event descriptor of the pulse/space
66 *
67 * This routine (which may be called from an interrupt context) stores a
68 * pulse/space duration for the raw ir decoding state machines. Pulses are
69 * signalled as positive values and spaces as negative values. A zero value
70 * will reset the decoding state machines.
71 */
72int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
73{
74	if (!dev->raw)
75		return -EINVAL;
76
77	dev_dbg(&dev->dev, "sample: (%05dus %s)\n",
78		ev->duration, TO_STR(ev->pulse));
79
80	if (!kfifo_put(&dev->raw->kfifo, *ev)) {
81		dev_err(&dev->dev, "IR event FIFO is full!\n");
82		return -ENOSPC;
83	}
84
85	return 0;
86}
87EXPORT_SYMBOL_GPL(ir_raw_event_store);
88
89/**
90 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
91 * @dev:	the struct rc_dev device descriptor
92 * @pulse:	true for pulse, false for space
93 *
94 * This routine (which may be called from an interrupt context) is used to
95 * store the beginning of an ir pulse or space (or the start/end of ir
96 * reception) for the raw ir decoding state machines. This is used by
97 * hardware which does not provide durations directly but only interrupts
98 * (or similar events) on state change.
99 */
100int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
101{
102	ktime_t			now;
103	struct ir_raw_event	ev = {};
104
105	if (!dev->raw)
106		return -EINVAL;
107
108	now = ktime_get();
109	ev.duration = ktime_to_us(ktime_sub(now, dev->raw->last_event));
110	ev.pulse = !pulse;
111
112	return ir_raw_event_store_with_timeout(dev, &ev);
113}
114EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
115
116/*
117 * ir_raw_event_store_with_timeout() - pass a pulse/space duration to the raw
118 *				       ir decoders, schedule decoding and
119 *				       timeout
120 * @dev:	the struct rc_dev device descriptor
121 * @ev:		the struct ir_raw_event descriptor of the pulse/space
122 *
123 * This routine (which may be called from an interrupt context) stores a
124 * pulse/space duration for the raw ir decoding state machines, schedules
125 * decoding and generates a timeout.
126 */
127int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev)
128{
129	ktime_t		now;
130	int		rc = 0;
131
132	if (!dev->raw)
133		return -EINVAL;
134
135	now = ktime_get();
136
137	spin_lock(&dev->raw->edge_spinlock);
138	rc = ir_raw_event_store(dev, ev);
139
140	dev->raw->last_event = now;
141
142	/* timer could be set to timeout (125ms by default) */
143	if (!timer_pending(&dev->raw->edge_handle) ||
144	    time_after(dev->raw->edge_handle.expires,
145		       jiffies + msecs_to_jiffies(15))) {
146		mod_timer(&dev->raw->edge_handle,
147			  jiffies + msecs_to_jiffies(15));
148	}
149	spin_unlock(&dev->raw->edge_spinlock);
150
151	return rc;
152}
153EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout);
154
155/**
156 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
157 * @dev:	the struct rc_dev device descriptor
158 * @ev:		the event that has occurred
159 *
160 * This routine (which may be called from an interrupt context) works
161 * in similar manner to ir_raw_event_store_edge.
162 * This routine is intended for devices with limited internal buffer
163 * It automerges samples of same type, and handles timeouts. Returns non-zero
164 * if the event was added, and zero if the event was ignored due to idle
165 * processing.
166 */
167int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
168{
169	if (!dev->raw)
170		return -EINVAL;
171
172	/* Ignore spaces in idle mode */
173	if (dev->idle && !ev->pulse)
174		return 0;
175	else if (dev->idle)
176		ir_raw_event_set_idle(dev, false);
177
178	if (!dev->raw->this_ev.duration)
179		dev->raw->this_ev = *ev;
180	else if (ev->pulse == dev->raw->this_ev.pulse)
181		dev->raw->this_ev.duration += ev->duration;
182	else {
183		ir_raw_event_store(dev, &dev->raw->this_ev);
184		dev->raw->this_ev = *ev;
185	}
186
187	/* Enter idle mode if necessary */
188	if (!ev->pulse && dev->timeout &&
189	    dev->raw->this_ev.duration >= dev->timeout)
190		ir_raw_event_set_idle(dev, true);
191
192	return 1;
193}
194EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
195
196/**
197 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
198 * @dev:	the struct rc_dev device descriptor
199 * @idle:	whether the device is idle or not
200 */
201void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
202{
203	if (!dev->raw)
204		return;
205
206	dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave");
207
208	if (idle) {
209		dev->raw->this_ev.timeout = true;
210		ir_raw_event_store(dev, &dev->raw->this_ev);
211		dev->raw->this_ev = (struct ir_raw_event) {};
212	}
213
214	if (dev->s_idle)
215		dev->s_idle(dev, idle);
216
217	dev->idle = idle;
218}
219EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
220
221/**
222 * ir_raw_event_handle() - schedules the decoding of stored ir data
223 * @dev:	the struct rc_dev device descriptor
224 *
225 * This routine will tell rc-core to start decoding stored ir data.
226 */
227void ir_raw_event_handle(struct rc_dev *dev)
228{
229	if (!dev->raw || !dev->raw->thread)
230		return;
231
232	wake_up_process(dev->raw->thread);
233}
234EXPORT_SYMBOL_GPL(ir_raw_event_handle);
235
236/* used internally by the sysfs interface */
237u64
238ir_raw_get_allowed_protocols(void)
239{
240	return atomic64_read(&available_protocols);
241}
242
243static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
244{
245	struct ir_raw_handler *handler;
246	u32 timeout = 0;
247
248	mutex_lock(&ir_raw_handler_lock);
249	list_for_each_entry(handler, &ir_raw_handler_list, list) {
250		if (!(dev->enabled_protocols & handler->protocols) &&
251		    (*rc_proto & handler->protocols) && handler->raw_register)
252			handler->raw_register(dev);
253
254		if ((dev->enabled_protocols & handler->protocols) &&
255		    !(*rc_proto & handler->protocols) &&
256		    handler->raw_unregister)
257			handler->raw_unregister(dev);
258	}
259	mutex_unlock(&ir_raw_handler_lock);
260
261	if (!dev->max_timeout)
262		return 0;
263
264	mutex_lock(&ir_raw_handler_lock);
265	list_for_each_entry(handler, &ir_raw_handler_list, list) {
266		if (handler->protocols & *rc_proto) {
267			if (timeout < handler->min_timeout)
268				timeout = handler->min_timeout;
269		}
270	}
271	mutex_unlock(&ir_raw_handler_lock);
272
273	if (timeout == 0)
274		timeout = IR_DEFAULT_TIMEOUT;
275	else
276		timeout += MS_TO_US(10);
277
278	if (timeout < dev->min_timeout)
279		timeout = dev->min_timeout;
280	else if (timeout > dev->max_timeout)
281		timeout = dev->max_timeout;
282
283	if (dev->s_timeout)
284		dev->s_timeout(dev, timeout);
285	else
286		dev->timeout = timeout;
287
288	return 0;
289}
290
291static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
292{
293	mutex_lock(&dev->lock);
294	dev->enabled_protocols &= ~protocols;
295	mutex_unlock(&dev->lock);
296}
297
298/**
299 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
300 * @ev:		Pointer to pointer to next free event. *@ev is incremented for
301 *		each raw event filled.
302 * @max:	Maximum number of raw events to fill.
303 * @timings:	Manchester modulation timings.
304 * @n:		Number of bits of data.
305 * @data:	Data bits to encode.
306 *
307 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
308 * modulation with the timing characteristics described by @timings, writing up
309 * to @max raw IR events using the *@ev pointer.
310 *
311 * Returns:	0 on success.
312 *		-ENOBUFS if there isn't enough space in the array to fit the
313 *		full encoded data. In this case all @max events will have been
314 *		written.
315 */
316int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
317			  const struct ir_raw_timings_manchester *timings,
318			  unsigned int n, u64 data)
319{
320	bool need_pulse;
321	u64 i;
322	int ret = -ENOBUFS;
323
324	i = BIT_ULL(n - 1);
325
326	if (timings->leader_pulse) {
327		if (!max--)
328			return ret;
329		init_ir_raw_event_duration((*ev), 1, timings->leader_pulse);
330		if (timings->leader_space) {
331			if (!max--)
332				return ret;
333			init_ir_raw_event_duration(++(*ev), 0,
334						   timings->leader_space);
335		}
336	} else {
337		/* continue existing signal */
338		--(*ev);
339	}
340	/* from here on *ev will point to the last event rather than the next */
341
342	while (n && i > 0) {
343		need_pulse = !(data & i);
344		if (timings->invert)
345			need_pulse = !need_pulse;
346		if (need_pulse == !!(*ev)->pulse) {
347			(*ev)->duration += timings->clock;
348		} else {
349			if (!max--)
350				goto nobufs;
351			init_ir_raw_event_duration(++(*ev), need_pulse,
352						   timings->clock);
353		}
354
355		if (!max--)
356			goto nobufs;
357		init_ir_raw_event_duration(++(*ev), !need_pulse,
358					   timings->clock);
359		i >>= 1;
360	}
361
362	if (timings->trailer_space) {
363		if (!(*ev)->pulse)
364			(*ev)->duration += timings->trailer_space;
365		else if (!max--)
366			goto nobufs;
367		else
368			init_ir_raw_event_duration(++(*ev), 0,
369						   timings->trailer_space);
370	}
371
372	ret = 0;
373nobufs:
374	/* point to the next event rather than last event before returning */
375	++(*ev);
376	return ret;
377}
378EXPORT_SYMBOL(ir_raw_gen_manchester);
379
380/**
381 * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
382 * @ev:		Pointer to pointer to next free event. *@ev is incremented for
383 *		each raw event filled.
384 * @max:	Maximum number of raw events to fill.
385 * @timings:	Pulse distance modulation timings.
386 * @n:		Number of bits of data.
387 * @data:	Data bits to encode.
388 *
389 * Encodes the @n least significant bits of @data using pulse-distance
390 * modulation with the timing characteristics described by @timings, writing up
391 * to @max raw IR events using the *@ev pointer.
392 *
393 * Returns:	0 on success.
394 *		-ENOBUFS if there isn't enough space in the array to fit the
395 *		full encoded data. In this case all @max events will have been
396 *		written.
397 */
398int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
399		  const struct ir_raw_timings_pd *timings,
400		  unsigned int n, u64 data)
401{
402	int i;
403	int ret;
404	unsigned int space;
405
406	if (timings->header_pulse) {
407		ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
408					     timings->header_space);
409		if (ret)
410			return ret;
411	}
412
413	if (timings->msb_first) {
414		for (i = n - 1; i >= 0; --i) {
415			space = timings->bit_space[(data >> i) & 1];
416			ret = ir_raw_gen_pulse_space(ev, &max,
417						     timings->bit_pulse,
418						     space);
419			if (ret)
420				return ret;
421		}
422	} else {
423		for (i = 0; i < n; ++i, data >>= 1) {
424			space = timings->bit_space[data & 1];
425			ret = ir_raw_gen_pulse_space(ev, &max,
426						     timings->bit_pulse,
427						     space);
428			if (ret)
429				return ret;
430		}
431	}
432
433	ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
434				     timings->trailer_space);
435	return ret;
436}
437EXPORT_SYMBOL(ir_raw_gen_pd);
438
439/**
440 * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
441 * @ev:		Pointer to pointer to next free event. *@ev is incremented for
442 *		each raw event filled.
443 * @max:	Maximum number of raw events to fill.
444 * @timings:	Pulse distance modulation timings.
445 * @n:		Number of bits of data.
446 * @data:	Data bits to encode.
447 *
448 * Encodes the @n least significant bits of @data using space-distance
449 * modulation with the timing characteristics described by @timings, writing up
450 * to @max raw IR events using the *@ev pointer.
451 *
452 * Returns:	0 on success.
453 *		-ENOBUFS if there isn't enough space in the array to fit the
454 *		full encoded data. In this case all @max events will have been
455 *		written.
456 */
457int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
458		  const struct ir_raw_timings_pl *timings,
459		  unsigned int n, u64 data)
460{
461	int i;
462	int ret = -ENOBUFS;
463	unsigned int pulse;
464
465	if (!max--)
466		return ret;
467
468	init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
469
470	if (timings->msb_first) {
471		for (i = n - 1; i >= 0; --i) {
472			if (!max--)
473				return ret;
474			init_ir_raw_event_duration((*ev)++, 0,
475						   timings->bit_space);
476			if (!max--)
477				return ret;
478			pulse = timings->bit_pulse[(data >> i) & 1];
479			init_ir_raw_event_duration((*ev)++, 1, pulse);
480		}
481	} else {
482		for (i = 0; i < n; ++i, data >>= 1) {
483			if (!max--)
484				return ret;
485			init_ir_raw_event_duration((*ev)++, 0,
486						   timings->bit_space);
487			if (!max--)
488				return ret;
489			pulse = timings->bit_pulse[data & 1];
490			init_ir_raw_event_duration((*ev)++, 1, pulse);
491		}
492	}
493
494	if (!max--)
495		return ret;
496
497	init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
498
499	return 0;
500}
501EXPORT_SYMBOL(ir_raw_gen_pl);
502
503/**
504 * ir_raw_encode_scancode() - Encode a scancode as raw events
505 *
506 * @protocol:		protocol
507 * @scancode:		scancode filter describing a single scancode
508 * @events:		array of raw events to write into
509 * @max:		max number of raw events
510 *
511 * Attempts to encode the scancode as raw events.
512 *
513 * Returns:	The number of events written.
514 *		-ENOBUFS if there isn't enough space in the array to fit the
515 *		encoding. In this case all @max events will have been written.
516 *		-EINVAL if the scancode is ambiguous or invalid, or if no
517 *		compatible encoder was found.
518 */
519int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
520			   struct ir_raw_event *events, unsigned int max)
521{
522	struct ir_raw_handler *handler;
523	int ret = -EINVAL;
524	u64 mask = 1ULL << protocol;
525
526	ir_raw_load_modules(&mask);
527
528	mutex_lock(&ir_raw_handler_lock);
529	list_for_each_entry(handler, &ir_raw_handler_list, list) {
530		if (handler->protocols & mask && handler->encode) {
531			ret = handler->encode(protocol, scancode, events, max);
532			if (ret >= 0 || ret == -ENOBUFS)
533				break;
534		}
535	}
536	mutex_unlock(&ir_raw_handler_lock);
537
538	return ret;
539}
540EXPORT_SYMBOL(ir_raw_encode_scancode);
541
542/**
543 * ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing
544 *
545 * @t:		timer_list
546 *
547 * This callback is armed by ir_raw_event_store_edge(). It does two things:
548 * first of all, rather than calling ir_raw_event_handle() for each
549 * edge and waking up the rc thread, 15 ms after the first edge
550 * ir_raw_event_handle() is called. Secondly, generate a timeout event
551 * no more IR is received after the rc_dev timeout.
552 */
553static void ir_raw_edge_handle(struct timer_list *t)
554{
555	struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
556	struct rc_dev *dev = raw->dev;
557	unsigned long flags;
558	ktime_t interval;
559
560	spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
561	interval = ktime_sub(ktime_get(), dev->raw->last_event);
562	if (ktime_to_us(interval) >= dev->timeout) {
563		struct ir_raw_event ev = {
564			.timeout = true,
565			.duration = ktime_to_us(interval)
566		};
567
568		ir_raw_event_store(dev, &ev);
569	} else {
570		mod_timer(&dev->raw->edge_handle,
571			  jiffies + usecs_to_jiffies(dev->timeout -
572						     ktime_to_us(interval)));
573	}
574	spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
575
576	ir_raw_event_handle(dev);
577}
578
579/**
580 * ir_raw_encode_carrier() - Get carrier used for protocol
581 *
582 * @protocol:		protocol
583 *
584 * Attempts to find the carrier for the specified protocol
585 *
586 * Returns:	The carrier in Hz
587 *		-EINVAL if the protocol is invalid, or if no
588 *		compatible encoder was found.
589 */
590int ir_raw_encode_carrier(enum rc_proto protocol)
591{
592	struct ir_raw_handler *handler;
593	int ret = -EINVAL;
594	u64 mask = BIT_ULL(protocol);
595
596	mutex_lock(&ir_raw_handler_lock);
597	list_for_each_entry(handler, &ir_raw_handler_list, list) {
598		if (handler->protocols & mask && handler->encode) {
599			ret = handler->carrier;
600			break;
601		}
602	}
603	mutex_unlock(&ir_raw_handler_lock);
604
605	return ret;
606}
607EXPORT_SYMBOL(ir_raw_encode_carrier);
608
609/*
610 * Used to (un)register raw event clients
611 */
612int ir_raw_event_prepare(struct rc_dev *dev)
613{
614	if (!dev)
615		return -EINVAL;
616
617	dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
618	if (!dev->raw)
619		return -ENOMEM;
620
621	dev->raw->dev = dev;
622	dev->change_protocol = change_protocol;
623	dev->idle = true;
624	spin_lock_init(&dev->raw->edge_spinlock);
625	timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0);
626	INIT_KFIFO(dev->raw->kfifo);
627
628	return 0;
629}
630
631int ir_raw_event_register(struct rc_dev *dev)
632{
633	struct task_struct *thread;
634
635	thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor);
636	if (IS_ERR(thread))
637		return PTR_ERR(thread);
638
639	dev->raw->thread = thread;
640
641	mutex_lock(&ir_raw_handler_lock);
642	list_add_tail(&dev->raw->list, &ir_raw_client_list);
643	mutex_unlock(&ir_raw_handler_lock);
644
645	return 0;
646}
647
648void ir_raw_event_free(struct rc_dev *dev)
649{
650	if (!dev)
651		return;
652
653	kfree(dev->raw);
654	dev->raw = NULL;
655}
656
657void ir_raw_event_unregister(struct rc_dev *dev)
658{
659	struct ir_raw_handler *handler;
660
661	if (!dev || !dev->raw)
662		return;
663
664	kthread_stop(dev->raw->thread);
665	del_timer_sync(&dev->raw->edge_handle);
666
667	mutex_lock(&ir_raw_handler_lock);
668	list_del(&dev->raw->list);
669	list_for_each_entry(handler, &ir_raw_handler_list, list)
670		if (handler->raw_unregister &&
671		    (handler->protocols & dev->enabled_protocols))
672			handler->raw_unregister(dev);
673
674	lirc_bpf_free(dev);
675
676	ir_raw_event_free(dev);
677
678	/*
679	 * A user can be calling bpf(BPF_PROG_{QUERY|ATTACH|DETACH}), so
680	 * ensure that the raw member is null on unlock; this is how
681	 * "device gone" is checked.
682	 */
683	mutex_unlock(&ir_raw_handler_lock);
684}
685
686/*
687 * Extension interface - used to register the IR decoders
688 */
689
690int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
691{
692	mutex_lock(&ir_raw_handler_lock);
693	list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
694	atomic64_or(ir_raw_handler->protocols, &available_protocols);
695	mutex_unlock(&ir_raw_handler_lock);
696
697	return 0;
698}
699EXPORT_SYMBOL(ir_raw_handler_register);
700
701void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
702{
703	struct ir_raw_event_ctrl *raw;
704	u64 protocols = ir_raw_handler->protocols;
705
706	mutex_lock(&ir_raw_handler_lock);
707	list_del(&ir_raw_handler->list);
708	list_for_each_entry(raw, &ir_raw_client_list, list) {
709		if (ir_raw_handler->raw_unregister &&
710		    (raw->dev->enabled_protocols & protocols))
711			ir_raw_handler->raw_unregister(raw->dev);
712		ir_raw_disable_protocols(raw->dev, protocols);
713	}
714	atomic64_andnot(protocols, &available_protocols);
715	mutex_unlock(&ir_raw_handler_lock);
716}
717EXPORT_SYMBOL(ir_raw_handler_unregister);
718