Lines Matching refs:desc

54 static int alloc_masks(struct irq_desc *desc, int node)
56 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
61 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
63 free_cpumask_var(desc->irq_common_data.affinity);
69 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
71 free_cpumask_var(desc->irq_common_data.effective_affinity);
73 free_cpumask_var(desc->irq_common_data.affinity);
80 static void desc_smp_init(struct irq_desc *desc, int node,
85 cpumask_copy(desc->irq_common_data.affinity, affinity);
88 cpumask_clear(desc->pending_mask);
91 desc->irq_common_data.node = node;
95 static void free_masks(struct irq_desc *desc)
98 free_cpumask_var(desc->pending_mask);
100 free_cpumask_var(desc->irq_common_data.affinity);
102 free_cpumask_var(desc->irq_common_data.effective_affinity);
108 alloc_masks(struct irq_desc *desc, int node) { return 0; }
110 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
111 static inline void free_masks(struct irq_desc *desc) { }
114 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
119 desc->irq_common_data.handler_data = NULL;
120 desc->irq_common_data.msi_desc = NULL;
122 desc->irq_data.common = &desc->irq_common_data;
123 desc->irq_data.irq = irq;
124 desc->irq_data.chip = &no_irq_chip;
125 desc->irq_data.chip_data = NULL;
126 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
127 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
128 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
129 desc->handle_irq = handle_bad_irq;
130 desc->depth = 1;
131 desc->irq_count = 0;
132 desc->irqs_unhandled = 0;
133 desc->tot_count = 0;
134 desc->name = NULL;
135 desc->owner = owner;
137 *per_cpu_ptr(desc->kstat_irqs, cpu) = (struct irqstat) { };
138 desc_smp_init(desc, node, affinity);
163 struct irq_desc *desc;
166 desc = mt_find(&sparse_irqs, &index, nr_irqs);
168 return desc ? irq_desc_get_irq(desc) : nr_irqs;
171 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
174 WARN_ON(mas_store_gfp(&mas, desc, GFP_KERNEL) != 0);
187 static int init_desc(struct irq_desc *desc, int irq, int node,
192 desc->kstat_irqs = alloc_percpu(struct irqstat);
193 if (!desc->kstat_irqs)
196 if (alloc_masks(desc, node)) {
197 free_percpu(desc->kstat_irqs);
201 raw_spin_lock_init(&desc->lock);
202 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
203 mutex_init(&desc->request_mutex);
204 init_waitqueue_head(&desc->wait_for_threads);
205 desc_set_defaults(irq, desc, node, affinity, owner);
206 irqd_set(&desc->irq_data, flags);
207 irq_resend_init(desc);
209 kobject_init(&desc->kobj, &irq_kobj_type);
210 init_rcu_head(&desc->rcu);
229 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
235 unsigned int c = irq_desc_kstat_cpu(desc, cpu);
249 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
252 raw_spin_lock_irq(&desc->lock);
253 if (desc->irq_data.chip && desc->irq_data.chip->name) {
255 desc->irq_data.chip->name);
257 raw_spin_unlock_irq(&desc->lock);
266 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
269 raw_spin_lock_irq(&desc->lock);
270 if (desc->irq_data.domain)
271 ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq);
272 raw_spin_unlock_irq(&desc->lock);
281 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
284 raw_spin_lock_irq(&desc->lock);
286 irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
287 raw_spin_unlock_irq(&desc->lock);
297 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
300 raw_spin_lock_irq(&desc->lock);
302 irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled");
303 raw_spin_unlock_irq(&desc->lock);
313 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
316 raw_spin_lock_irq(&desc->lock);
317 if (desc->name)
318 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
319 raw_spin_unlock_irq(&desc->lock);
328 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
333 raw_spin_lock_irq(&desc->lock);
334 for_each_action_of_desc(desc, action) {
339 raw_spin_unlock_irq(&desc->lock);
366 static void irq_sysfs_add(int irq, struct irq_desc *desc)
374 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
377 desc->istate |= IRQS_SYSFS;
381 static void irq_sysfs_del(struct irq_desc *desc)
389 if (desc->istate & IRQS_SYSFS)
390 kobject_del(&desc->kobj);
395 struct irq_desc *desc;
408 for_each_irq_desc(irq, desc)
409 irq_sysfs_add(irq, desc);
422 static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
423 static void irq_sysfs_del(struct irq_desc *desc) {}
449 struct irq_desc *desc;
452 desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
453 if (!desc)
456 ret = init_desc(desc, irq, node, flags, affinity, owner);
458 kfree(desc);
462 return desc;
467 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
469 free_masks(desc);
470 free_percpu(desc->kstat_irqs);
471 kfree(desc);
476 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
478 kobject_put(&desc->kobj);
483 struct irq_desc *desc = irq_to_desc(irq);
485 irq_remove_debugfs_entry(desc);
486 unregister_irq_proc(irq, desc);
497 irq_sysfs_del(desc);
506 call_rcu(&desc->rcu, delayed_free_desc);
513 struct irq_desc *desc;
538 desc = alloc_desc(start + i, node, flags, mask, owner);
539 if (!desc)
541 irq_insert_desc(start + i, desc);
542 irq_sysfs_add(start + i, desc);
543 irq_add_debugfs_entry(start + i, desc);
564 struct irq_desc *desc;
583 desc = alloc_desc(i, node, 0, NULL, NULL);
584 irq_insert_desc(i, desc);
635 struct irq_desc *desc = irq_to_desc(irq);
638 raw_spin_lock_irqsave(&desc->lock, flags);
639 desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
640 raw_spin_unlock_irqrestore(&desc->lock, flags);
651 struct irq_desc *desc = irq_to_desc(start + i);
653 desc->owner = owner;
654 irq_insert_desc(start + i, desc);
680 int handle_irq_desc(struct irq_desc *desc)
684 if (!desc)
687 data = irq_desc_get_irq_data(desc);
691 generic_handle_irq_desc(desc);
884 struct irq_desc *desc = irq_to_desc(irq);
886 if (desc) {
889 !irq_settings_is_per_cpu_devid(desc))
893 irq_settings_is_per_cpu_devid(desc))
898 chip_bus_lock(desc);
899 raw_spin_lock_irqsave(&desc->lock, *flags);
901 return desc;
904 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
905 __releases(&desc->lock)
907 raw_spin_unlock_irqrestore(&desc->lock, flags);
909 chip_bus_sync_unlock(desc);
915 struct irq_desc *desc = irq_to_desc(irq);
917 if (!desc || desc->percpu_enabled)
920 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
922 if (!desc->percpu_enabled)
925 desc->percpu_affinity = affinity ? : cpu_possible_mask;
938 struct irq_desc *desc = irq_to_desc(irq);
940 if (!desc || !desc->percpu_enabled)
944 cpumask_copy(affinity, desc->percpu_affinity);
966 struct irq_desc *desc = irq_to_desc(irq);
968 return desc && desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0;
971 unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask)
976 if (!irq_settings_is_per_cpu_devid(desc) &&
977 !irq_settings_is_per_cpu(desc) &&
978 !irq_is_nmi(desc))
979 return data_race(desc->tot_count);
982 sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu));
988 struct irq_desc *desc = irq_to_desc(irq);
990 if (!desc || !desc->kstat_irqs)
992 return kstat_irqs_desc(desc, cpu_possible_mask);
999 struct irq_desc *desc;
1002 for_each_irq_desc(irq, desc) {
1003 if (!desc->kstat_irqs)
1005 this_cpu_write(desc->kstat_irqs->ref, this_cpu_read(desc->kstat_irqs->cnt));
1011 struct irq_desc *desc = irq_to_desc(irq);
1013 if (!desc || !desc->kstat_irqs)
1015 return this_cpu_read(desc->kstat_irqs->cnt) - this_cpu_read(desc->kstat_irqs->ref);
1044 struct irq_desc *desc = irq_to_desc(irq);
1046 if (desc) {
1047 lockdep_set_class(&desc->lock, lock_class);
1048 lockdep_set_class(&desc->request_mutex, request_class);