Lines Matching refs:trig

52 	struct iio_trigger *trig = to_iio_trigger(dev);
54 return sysfs_emit(buf, "%s\n", trig->name);
115 int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
119 if (!indio_dev || !trig)
126 indio_dev->trig = iio_trigger_get(trig);
148 struct iio_trigger *trig = NULL, *iter;
153 trig = iter;
154 iio_trigger_get(trig);
159 return trig;
164 struct iio_trigger *trig = container_of(work, struct iio_trigger,
171 trig->ops->reenable(trig);
188 static void iio_trigger_notify_done_atomic(struct iio_trigger *trig)
190 if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
191 trig->ops->reenable)
192 schedule_work(&trig->reenable_work);
197 * @trig: trigger which occurred
201 void iio_trigger_poll(struct iio_trigger *trig)
205 if (!atomic_read(&trig->use_count)) {
206 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
209 if (trig->subirqs[i].enabled)
210 generic_handle_irq(trig->subirq_base + i);
212 iio_trigger_notify_done_atomic(trig);
228 * @trig: trigger which occurred
232 void iio_trigger_poll_nested(struct iio_trigger *trig)
236 if (!atomic_read(&trig->use_count)) {
237 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
240 if (trig->subirqs[i].enabled)
241 handle_nested_irq(trig->subirq_base + i);
243 iio_trigger_notify_done(trig);
249 void iio_trigger_notify_done(struct iio_trigger *trig)
251 if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
252 trig->ops->reenable)
253 trig->ops->reenable(trig);
258 static int iio_trigger_get_irq(struct iio_trigger *trig)
262 mutex_lock(&trig->pool_lock);
263 ret = bitmap_find_free_region(trig->pool,
266 mutex_unlock(&trig->pool_lock);
268 ret += trig->subirq_base;
273 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
275 mutex_lock(&trig->pool_lock);
276 clear_bit(irq - trig->subirq_base, trig->pool);
277 mutex_unlock(&trig->pool_lock);
287 int iio_trigger_attach_poll_func(struct iio_trigger *trig,
292 bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
299 pf->irq = iio_trigger_get_irq(trig);
302 trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
314 if (trig->ops && trig->ops->set_trigger_state && notinuse) {
315 ret = trig->ops->set_trigger_state(trig, true);
325 if (iio_validate_own_trigger(pf->indio_dev, trig))
326 trig->attached_own_device = true;
333 iio_trigger_put_irq(trig, pf->irq);
339 int iio_trigger_detach_poll_func(struct iio_trigger *trig,
344 bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1;
347 if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
348 ret = trig->ops->set_trigger_state(trig, false);
352 if (pf->indio_dev->dev.parent == trig->dev.parent)
353 trig->attached_own_device = false;
354 iio_trigger_put_irq(trig, pf->irq);
425 if (indio_dev->trig)
426 return sysfs_emit(buf, "%s\n", indio_dev->trig->name);
450 struct iio_trigger *oldtrig = indio_dev->trig;
451 struct iio_trigger *trig;
465 trig = iio_trigger_acquire_by_name(buf);
466 if (oldtrig == trig) {
471 if (trig && indio_dev->info->validate_trigger) {
472 ret = indio_dev->info->validate_trigger(indio_dev, trig);
477 if (trig && trig->ops && trig->ops->validate_device) {
478 ret = trig->ops->validate_device(trig, indio_dev);
483 indio_dev->trig = trig;
491 if (indio_dev->trig) {
493 iio_trigger_attach_poll_func(indio_dev->trig,
500 if (trig)
501 iio_trigger_put(trig);
519 struct iio_trigger *trig = to_iio_trigger(device);
522 if (trig->subirq_base) {
524 irq_modify_status(trig->subirq_base + i,
527 irq_set_chip(trig->subirq_base + i,
529 irq_set_handler(trig->subirq_base + i,
533 irq_free_descs(trig->subirq_base,
536 kfree(trig->name);
537 kfree(trig);
548 struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
550 trig->subirqs[d->irq - trig->subirq_base].enabled = false;
556 struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
558 trig->subirqs[d->irq - trig->subirq_base].enabled = true;
567 struct iio_trigger *trig;
570 trig = kzalloc(sizeof(*trig), GFP_KERNEL);
571 if (!trig)
574 trig->dev.parent = parent;
575 trig->dev.type = &iio_trig_type;
576 trig->dev.bus = &iio_bus_type;
577 device_initialize(&trig->dev);
578 INIT_WORK(&trig->reenable_work, iio_reenable_work_fn);
580 mutex_init(&trig->pool_lock);
581 trig->subirq_base = irq_alloc_descs(-1, 0,
584 if (trig->subirq_base < 0)
587 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
588 if (trig->name == NULL)
591 INIT_LIST_HEAD(&trig->list);
593 trig->owner = this_mod;
595 trig->subirq_chip.name = trig->name;
596 trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
597 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
599 irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
600 irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
601 irq_modify_status(trig->subirq_base + i,
605 return trig;
608 irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
610 kfree(trig);
629 struct iio_trigger *trig;
633 trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
636 return trig;
640 void iio_trigger_free(struct iio_trigger *trig)
642 if (trig)
643 put_device(&trig->dev);
671 struct iio_trigger **ptr, *trig;
681 trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
683 if (trig) {
684 *ptr = trig;
690 return trig;
727 return indio_dev->trig->attached_own_device;
735 * @trig: the IIO trigger to check
743 int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig)
745 if (idev->dev.parent != trig->dev.parent)
754 * @trig: The IIO trigger to check
763 int iio_trigger_validate_own_device(struct iio_trigger *trig,
766 if (indio_dev->dev.parent != trig->dev.parent)
781 if (indio_dev->trig)
782 iio_trigger_put(indio_dev->trig);