• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/tile/kernel/
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 */
14
15#include <linux/fs.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/rwsem.h>
19#include <linux/kprobes.h>
20#include <linux/sched.h>
21#include <linux/hardirq.h>
22#include <linux/uaccess.h>
23#include <linux/smp.h>
24#include <linux/cdev.h>
25#include <linux/compat.h>
26#include <asm/hardwall.h>
27#include <asm/traps.h>
28#include <asm/siginfo.h>
29#include <asm/irq_regs.h>
30
31#include <arch/interrupts.h>
32#include <arch/spr_def.h>
33
34
35/*
36 * This data structure tracks the rectangle data, etc., associated
37 * one-to-one with a "struct file *" from opening HARDWALL_FILE.
38 * Note that the file's private data points back to this structure.
39 */
40struct hardwall_info {
41	struct list_head list;             /* "rectangles" list */
42	struct list_head task_head;        /* head of tasks in this hardwall */
43	int ulhc_x;                        /* upper left hand corner x coord */
44	int ulhc_y;                        /* upper left hand corner y coord */
45	int width;                         /* rectangle width */
46	int height;                        /* rectangle height */
47	int teardown_in_progress;          /* are we tearing this one down? */
48};
49
50/* Currently allocated hardwall rectangles */
51static LIST_HEAD(rectangles);
52
53/*
54 * Guard changes to the hardwall data structures.
55 * This could be finer grained (e.g. one lock for the list of hardwall
56 * rectangles, then separate embedded locks for each one's list of tasks),
57 * but there are subtle correctness issues when trying to start with
58 * a task's "hardwall" pointer and lock the correct rectangle's embedded
59 * lock in the presence of a simultaneous deactivation, so it seems
60 * easier to have a single lock, given that none of these data
61 * structures are touched very frequently during normal operation.
62 */
63static DEFINE_SPINLOCK(hardwall_lock);
64
65/* Allow disabling UDN access. */
66static int udn_disabled;
67static int __init noudn(char *str)
68{
69	pr_info("User-space UDN access is disabled\n");
70	udn_disabled = 1;
71	return 0;
72}
73early_param("noudn", noudn);
74
75
76/*
77 * Low-level primitives
78 */
79
80/* Set a CPU bit if the CPU is online. */
81#define cpu_online_set(cpu, dst) do { \
82	if (cpu_online(cpu))          \
83		cpumask_set_cpu(cpu, dst);    \
84} while (0)
85
86
87/* Does the given rectangle contain the given x,y coordinate? */
88static int contains(struct hardwall_info *r, int x, int y)
89{
90	return (x >= r->ulhc_x && x < r->ulhc_x + r->width) &&
91		(y >= r->ulhc_y && y < r->ulhc_y + r->height);
92}
93
94/* Compute the rectangle parameters and validate the cpumask. */
95static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
96{
97	int x, y, cpu, ulhc, lrhc;
98
99	/* The first cpu is the ULHC, the last the LRHC. */
100	ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits);
101	lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits);
102
103	/* Compute the rectangle attributes from the cpus. */
104	r->ulhc_x = cpu_x(ulhc);
105	r->ulhc_y = cpu_y(ulhc);
106	r->width = cpu_x(lrhc) - r->ulhc_x + 1;
107	r->height = cpu_y(lrhc) - r->ulhc_y + 1;
108
109	/* Width and height must be positive */
110	if (r->width <= 0 || r->height <= 0)
111		return -EINVAL;
112
113	/* Confirm that the cpumask is exactly the rectangle. */
114	for (y = 0, cpu = 0; y < smp_height; ++y)
115		for (x = 0; x < smp_width; ++x, ++cpu)
116			if (cpumask_test_cpu(cpu, mask) != contains(r, x, y))
117				return -EINVAL;
118
119	/*
120	 * Note that offline cpus can't be drained when this UDN
121	 * rectangle eventually closes.  We used to detect this
122	 * situation and print a warning, but it annoyed users and
123	 * they ignored it anyway, so now we just return without a
124	 * warning.
125	 */
126	return 0;
127}
128
129/* Do the two given rectangles overlap on any cpu? */
130static int overlaps(struct hardwall_info *a, struct hardwall_info *b)
131{
132	return a->ulhc_x + a->width > b->ulhc_x &&    /* A not to the left */
133		b->ulhc_x + b->width > a->ulhc_x &&   /* B not to the left */
134		a->ulhc_y + a->height > b->ulhc_y &&  /* A not above */
135		b->ulhc_y + b->height > a->ulhc_y;    /* B not above */
136}
137
138
139/*
140 * Hardware management of hardwall setup, teardown, trapping,
141 * and enabling/disabling PL0 access to the networks.
142 */
143
144/* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */
145enum direction_protect {
146	N_PROTECT = (1 << 0),
147	E_PROTECT = (1 << 1),
148	S_PROTECT = (1 << 2),
149	W_PROTECT = (1 << 3)
150};
151
152static void enable_firewall_interrupts(void)
153{
154	raw_local_irq_unmask_now(INT_UDN_FIREWALL);
155}
156
157static void disable_firewall_interrupts(void)
158{
159	raw_local_irq_mask_now(INT_UDN_FIREWALL);
160}
161
162/* Set up hardwall on this cpu based on the passed hardwall_info. */
163static void hardwall_setup_ipi_func(void *info)
164{
165	struct hardwall_info *r = info;
166	int cpu = smp_processor_id();
167	int x = cpu % smp_width;
168	int y = cpu / smp_width;
169	int bits = 0;
170	if (x == r->ulhc_x)
171		bits |= W_PROTECT;
172	if (x == r->ulhc_x + r->width - 1)
173		bits |= E_PROTECT;
174	if (y == r->ulhc_y)
175		bits |= N_PROTECT;
176	if (y == r->ulhc_y + r->height - 1)
177		bits |= S_PROTECT;
178	BUG_ON(bits == 0);
179	__insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits);
180	enable_firewall_interrupts();
181
182}
183
184/* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
185static void hardwall_setup(struct hardwall_info *r)
186{
187	int x, y, cpu, delta;
188	struct cpumask rect_cpus;
189
190	cpumask_clear(&rect_cpus);
191
192	/* First include the top and bottom edges */
193	cpu = r->ulhc_y * smp_width + r->ulhc_x;
194	delta = (r->height - 1) * smp_width;
195	for (x = 0; x < r->width; ++x, ++cpu) {
196		cpu_online_set(cpu, &rect_cpus);
197		cpu_online_set(cpu + delta, &rect_cpus);
198	}
199
200	/* Then the left and right edges */
201	cpu -= r->width;
202	delta = r->width - 1;
203	for (y = 0; y < r->height; ++y, cpu += smp_width) {
204		cpu_online_set(cpu, &rect_cpus);
205		cpu_online_set(cpu + delta, &rect_cpus);
206	}
207
208	/* Then tell all the cpus to set up their protection SPR */
209	on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1);
210}
211
212void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
213{
214	struct hardwall_info *rect;
215	struct task_struct *p;
216	struct siginfo info;
217	int x, y;
218	int cpu = smp_processor_id();
219	int found_processes;
220	unsigned long flags;
221
222	struct pt_regs *old_regs = set_irq_regs(regs);
223	irq_enter();
224
225	/* This tile trapped a network access; find the rectangle. */
226	x = cpu % smp_width;
227	y = cpu / smp_width;
228	spin_lock_irqsave(&hardwall_lock, flags);
229	list_for_each_entry(rect, &rectangles, list) {
230		if (contains(rect, x, y))
231			break;
232	}
233
234	/*
235	 * It shouldn't be possible not to find this cpu on the
236	 * rectangle list, since only cpus in rectangles get hardwalled.
237	 * The hardwall is only removed after the UDN is drained.
238	 */
239	BUG_ON(&rect->list == &rectangles);
240
241	/*
242	 * If we already started teardown on this hardwall, don't worry;
243	 * the abort signal has been sent and we are just waiting for things
244	 * to quiesce.
245	 */
246	if (rect->teardown_in_progress) {
247		pr_notice("cpu %d: detected hardwall violation %#lx"
248		       " while teardown already in progress\n",
249		       cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
250		goto done;
251	}
252
253	/*
254	 * Kill off any process that is activated in this rectangle.
255	 * We bypass security to deliver the signal, since it must be
256	 * one of the activated processes that generated the UDN
257	 * message that caused this trap, and all the activated
258	 * processes shared a single open file so are pretty tightly
259	 * bound together from a security point of view to begin with.
260	 */
261	rect->teardown_in_progress = 1;
262	wmb(); /* Ensure visibility of rectangle before notifying processes. */
263	pr_notice("cpu %d: detected hardwall violation %#lx...\n",
264	       cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
265	info.si_signo = SIGILL;
266	info.si_errno = 0;
267	info.si_code = ILL_HARDWALL;
268	found_processes = 0;
269	list_for_each_entry(p, &rect->task_head, thread.hardwall_list) {
270		BUG_ON(p->thread.hardwall != rect);
271		if (p->sighand) {
272			found_processes = 1;
273			pr_notice("hardwall: killing %d\n", p->pid);
274			spin_lock(&p->sighand->siglock);
275			__group_send_sig_info(info.si_signo, &info, p);
276			spin_unlock(&p->sighand->siglock);
277		}
278	}
279	if (!found_processes)
280		pr_notice("hardwall: no associated processes!\n");
281
282 done:
283	spin_unlock_irqrestore(&hardwall_lock, flags);
284
285	/*
286	 * We have to disable firewall interrupts now, or else when we
287	 * return from this handler, we will simply re-interrupt back to
288	 * it.  However, we can't clear the protection bits, since we
289	 * haven't yet drained the network, and that would allow packets
290	 * to cross out of the hardwall region.
291	 */
292	disable_firewall_interrupts();
293
294	irq_exit();
295	set_irq_regs(old_regs);
296}
297
298/* Allow access from user space to the UDN. */
299void grant_network_mpls(void)
300{
301	__insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1);
302	__insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1);
303	__insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1);
304	__insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1);
305#if !CHIP_HAS_REV1_XDN()
306	__insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1);
307	__insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1);
308#endif
309}
310
311/* Deny access from user space to the UDN. */
312void restrict_network_mpls(void)
313{
314	__insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1);
315	__insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1);
316	__insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1);
317	__insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1);
318#if !CHIP_HAS_REV1_XDN()
319	__insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1);
320	__insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1);
321#endif
322}
323
324
325/*
326 * Code to create, activate, deactivate, and destroy hardwall rectangles.
327 */
328
329/* Create a hardwall for the given rectangle */
330static struct hardwall_info *hardwall_create(
331	size_t size, const unsigned char __user *bits)
332{
333	struct hardwall_info *iter, *rect;
334	struct cpumask mask;
335	unsigned long flags;
336	int rc;
337
338	/* Reject crazy sizes out of hand, a la sys_mbind(). */
339	if (size > PAGE_SIZE)
340		return ERR_PTR(-EINVAL);
341
342	/* Copy whatever fits into a cpumask. */
343	if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size)))
344		return ERR_PTR(-EFAULT);
345
346	/*
347	 * If the size was short, clear the rest of the mask;
348	 * otherwise validate that the rest of the user mask was zero
349	 * (we don't try hard to be efficient when validating huge masks).
350	 */
351	if (size < sizeof(struct cpumask)) {
352		memset((char *)&mask + size, 0, sizeof(struct cpumask) - size);
353	} else if (size > sizeof(struct cpumask)) {
354		size_t i;
355		for (i = sizeof(struct cpumask); i < size; ++i) {
356			char c;
357			if (get_user(c, &bits[i]))
358				return ERR_PTR(-EFAULT);
359			if (c)
360				return ERR_PTR(-EINVAL);
361		}
362	}
363
364	/* Allocate a new rectangle optimistically. */
365	rect = kmalloc(sizeof(struct hardwall_info),
366			GFP_KERNEL | __GFP_ZERO);
367	if (rect == NULL)
368		return ERR_PTR(-ENOMEM);
369	INIT_LIST_HEAD(&rect->task_head);
370
371	/* Compute the rectangle size and validate that it's plausible. */
372	rc = setup_rectangle(rect, &mask);
373	if (rc != 0) {
374		kfree(rect);
375		return ERR_PTR(rc);
376	}
377
378	/* Confirm it doesn't overlap and add it to the list. */
379	spin_lock_irqsave(&hardwall_lock, flags);
380	list_for_each_entry(iter, &rectangles, list) {
381		if (overlaps(iter, rect)) {
382			spin_unlock_irqrestore(&hardwall_lock, flags);
383			kfree(rect);
384			return ERR_PTR(-EBUSY);
385		}
386	}
387	list_add_tail(&rect->list, &rectangles);
388	spin_unlock_irqrestore(&hardwall_lock, flags);
389
390	/* Set up appropriate hardwalling on all affected cpus. */
391	hardwall_setup(rect);
392
393	return rect;
394}
395
396/* Activate a given hardwall on this cpu for this process. */
397static int hardwall_activate(struct hardwall_info *rect)
398{
399	int cpu, x, y;
400	unsigned long flags;
401	struct task_struct *p = current;
402	struct thread_struct *ts = &p->thread;
403
404	/* Require a rectangle. */
405	if (rect == NULL)
406		return -ENODATA;
407
408	/* Not allowed to activate a rectangle that is being torn down. */
409	if (rect->teardown_in_progress)
410		return -EINVAL;
411
412	/*
413	 * Get our affinity; if we're not bound to this tile uniquely,
414	 * we can't access the network registers.
415	 */
416	if (cpumask_weight(&p->cpus_allowed) != 1)
417		return -EPERM;
418
419	/* Make sure we are bound to a cpu in this rectangle. */
420	cpu = smp_processor_id();
421	BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
422	x = cpu_x(cpu);
423	y = cpu_y(cpu);
424	if (!contains(rect, x, y))
425		return -EINVAL;
426
427	/* If we are already bound to this hardwall, it's a no-op. */
428	if (ts->hardwall) {
429		BUG_ON(ts->hardwall != rect);
430		return 0;
431	}
432
433	/* Success!  This process gets to use the user networks on this cpu. */
434	ts->hardwall = rect;
435	spin_lock_irqsave(&hardwall_lock, flags);
436	list_add(&ts->hardwall_list, &rect->task_head);
437	spin_unlock_irqrestore(&hardwall_lock, flags);
438	grant_network_mpls();
439	printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n",
440	       p->pid, p->comm, cpu);
441	return 0;
442}
443
444/*
445 * Deactivate a task's hardwall.  Must hold hardwall_lock.
446 * This method may be called from free_task(), so we don't want to
447 * rely on too many fields of struct task_struct still being valid.
448 * We assume the cpus_allowed, pid, and comm fields are still valid.
449 */
450static void _hardwall_deactivate(struct task_struct *task)
451{
452	struct thread_struct *ts = &task->thread;
453
454	if (cpumask_weight(&task->cpus_allowed) != 1) {
455		pr_err("pid %d (%s) releasing networks with"
456		       " an affinity mask containing %d cpus!\n",
457		       task->pid, task->comm,
458		       cpumask_weight(&task->cpus_allowed));
459		BUG();
460	}
461
462	BUG_ON(ts->hardwall == NULL);
463	ts->hardwall = NULL;
464	list_del(&ts->hardwall_list);
465	if (task == current)
466		restrict_network_mpls();
467}
468
469/* Deactivate a task's hardwall. */
470int hardwall_deactivate(struct task_struct *task)
471{
472	unsigned long flags;
473	int activated;
474
475	spin_lock_irqsave(&hardwall_lock, flags);
476	activated = (task->thread.hardwall != NULL);
477	if (activated)
478		_hardwall_deactivate(task);
479	spin_unlock_irqrestore(&hardwall_lock, flags);
480
481	if (!activated)
482		return -EINVAL;
483
484	printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n",
485	       task->pid, task->comm, smp_processor_id());
486	return 0;
487}
488
489/* Stop a UDN switch before draining the network. */
490static void stop_udn_switch(void *ignored)
491{
492#if !CHIP_HAS_REV1_XDN()
493	/* Freeze the switch and the demux. */
494	__insn_mtspr(SPR_UDN_SP_FREEZE,
495		     SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
496		     SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
497		     SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
498#endif
499}
500
501/* Drain all the state from a stopped switch. */
502static void drain_udn_switch(void *ignored)
503{
504#if !CHIP_HAS_REV1_XDN()
505	int i;
506	int from_tile_words, ca_count;
507
508	/* Empty out the 5 switch point fifos. */
509	for (i = 0; i < 5; i++) {
510		int words, j;
511		__insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
512		words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF;
513		for (j = 0; j < words; j++)
514			(void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA);
515		BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0);
516	}
517
518	/* Dump out the 3 word fifo at top. */
519	from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3;
520	for (i = 0; i < from_tile_words; i++)
521		(void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
522
523	/* Empty out demuxes. */
524	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
525		(void) __tile_udn0_receive();
526	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
527		(void) __tile_udn1_receive();
528	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
529		(void) __tile_udn2_receive();
530	while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
531		(void) __tile_udn3_receive();
532	BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0);
533
534	/* Empty out catch all. */
535	ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
536	for (i = 0; i < ca_count; i++)
537		(void) __insn_mfspr(SPR_UDN_CA_DATA);
538	BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0);
539
540	/* Clear demux logic. */
541	__insn_mtspr(SPR_UDN_DEMUX_CTL, 1);
542
543	/*
544	 * Write switch state; experimentation indicates that 0xc3000
545	 * is an idle switch point.
546	 */
547	for (i = 0; i < 5; i++) {
548		__insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
549		__insn_mtspr(SPR_UDN_SP_STATE, 0xc3000);
550	}
551#endif
552}
553
554/* Reset random UDN state registers at boot up and during hardwall teardown. */
555void reset_network_state(void)
556{
557#if !CHIP_HAS_REV1_XDN()
558	/* Reset UDN coordinates to their standard value */
559	unsigned int cpu = smp_processor_id();
560	unsigned int x = cpu % smp_width;
561	unsigned int y = cpu / smp_width;
562#endif
563
564	if (udn_disabled)
565		return;
566
567#if !CHIP_HAS_REV1_XDN()
568	__insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
569
570	/* Set demux tags to predefined values and enable them. */
571	__insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
572	__insn_mtspr(SPR_UDN_TAG_0, (1 << 0));
573	__insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
574	__insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
575	__insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
576#endif
577
578	/* Clear out other random registers so we have a clean slate. */
579	__insn_mtspr(SPR_UDN_AVAIL_EN, 0);
580	__insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0);
581#if !CHIP_HAS_REV1_XDN()
582	__insn_mtspr(SPR_UDN_REFILL_EN, 0);
583	__insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
584	__insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
585#endif
586
587	/* Start the switch and demux. */
588#if !CHIP_HAS_REV1_XDN()
589	__insn_mtspr(SPR_UDN_SP_FREEZE, 0);
590#endif
591}
592
593/* Restart a UDN switch after draining. */
594static void restart_udn_switch(void *ignored)
595{
596	reset_network_state();
597
598	/* Disable firewall interrupts. */
599	__insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0);
600	disable_firewall_interrupts();
601}
602
603/* Build a struct cpumask containing all valid tiles in bounding rectangle. */
604static void fill_mask(struct hardwall_info *r, struct cpumask *result)
605{
606	int x, y, cpu;
607
608	cpumask_clear(result);
609
610	cpu = r->ulhc_y * smp_width + r->ulhc_x;
611	for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) {
612		for (x = 0; x < r->width; ++x, ++cpu)
613			cpu_online_set(cpu, result);
614	}
615}
616
617/* Last reference to a hardwall is gone, so clear the network. */
618static void hardwall_destroy(struct hardwall_info *rect)
619{
620	struct task_struct *task;
621	unsigned long flags;
622	struct cpumask mask;
623
624	/* Make sure this file actually represents a rectangle. */
625	if (rect == NULL)
626		return;
627
628	/*
629	 * Deactivate any remaining tasks.  It's possible to race with
630	 * some other thread that is exiting and hasn't yet called
631	 * deactivate (when freeing its thread_info), so we carefully
632	 * deactivate any remaining tasks before freeing the
633	 * hardwall_info object itself.
634	 */
635	spin_lock_irqsave(&hardwall_lock, flags);
636	list_for_each_entry(task, &rect->task_head, thread.hardwall_list)
637		_hardwall_deactivate(task);
638	spin_unlock_irqrestore(&hardwall_lock, flags);
639
640	/* Drain the UDN. */
641	printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n",
642	       rect->width, rect->height, rect->ulhc_x, rect->ulhc_y);
643	fill_mask(rect, &mask);
644	on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1);
645	on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1);
646
647	/* Restart switch and disable firewall. */
648	on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1);
649
650	/* Now free the rectangle from the list. */
651	spin_lock_irqsave(&hardwall_lock, flags);
652	BUG_ON(!list_empty(&rect->task_head));
653	list_del(&rect->list);
654	spin_unlock_irqrestore(&hardwall_lock, flags);
655	kfree(rect);
656}
657
658
659/*
660 * Dump hardwall state via /proc; initialized in arch/tile/sys/proc.c.
661 */
662int proc_tile_hardwall_show(struct seq_file *sf, void *v)
663{
664	struct hardwall_info *r;
665
666	if (udn_disabled) {
667		seq_printf(sf, "%dx%d 0,0 pids:\n", smp_width, smp_height);
668		return 0;
669	}
670
671	spin_lock_irq(&hardwall_lock);
672	list_for_each_entry(r, &rectangles, list) {
673		struct task_struct *p;
674		seq_printf(sf, "%dx%d %d,%d pids:",
675			   r->width, r->height, r->ulhc_x, r->ulhc_y);
676		list_for_each_entry(p, &r->task_head, thread.hardwall_list) {
677			unsigned int cpu = cpumask_first(&p->cpus_allowed);
678			unsigned int x = cpu % smp_width;
679			unsigned int y = cpu / smp_width;
680			seq_printf(sf, " %d@%d,%d", p->pid, x, y);
681		}
682		seq_printf(sf, "\n");
683	}
684	spin_unlock_irq(&hardwall_lock);
685	return 0;
686}
687
688
689/*
690 * Character device support via ioctl/close.
691 */
692
693static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
694{
695	struct hardwall_info *rect = file->private_data;
696
697	if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
698		return -EINVAL;
699
700	switch (_IOC_NR(a)) {
701	case _HARDWALL_CREATE:
702		if (udn_disabled)
703			return -ENOSYS;
704		if (rect != NULL)
705			return -EALREADY;
706		rect = hardwall_create(_IOC_SIZE(a),
707					(const unsigned char __user *)b);
708		if (IS_ERR(rect))
709			return PTR_ERR(rect);
710		file->private_data = rect;
711		return 0;
712
713	case _HARDWALL_ACTIVATE:
714		return hardwall_activate(rect);
715
716	case _HARDWALL_DEACTIVATE:
717		if (current->thread.hardwall != rect)
718			return -EINVAL;
719		return hardwall_deactivate(current);
720
721	default:
722		return -EINVAL;
723	}
724}
725
726#ifdef CONFIG_COMPAT
727static long hardwall_compat_ioctl(struct file *file,
728				  unsigned int a, unsigned long b)
729{
730	/* Sign-extend the argument so it can be used as a pointer. */
731	return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b));
732}
733#endif
734
735/* The user process closed the file; revoke access to user networks. */
736static int hardwall_flush(struct file *file, fl_owner_t owner)
737{
738	struct hardwall_info *rect = file->private_data;
739	struct task_struct *task, *tmp;
740	unsigned long flags;
741
742	if (rect) {
743		/*
744		 * NOTE: if multiple threads are activated on this hardwall
745		 * file, the other threads will continue having access to the
746		 * UDN until they are context-switched out and back in again.
747		 *
748		 * NOTE: A NULL files pointer means the task is being torn
749		 * down, so in that case we also deactivate it.
750		 */
751		spin_lock_irqsave(&hardwall_lock, flags);
752		list_for_each_entry_safe(task, tmp, &rect->task_head,
753					 thread.hardwall_list) {
754			if (task->files == owner || task->files == NULL)
755				_hardwall_deactivate(task);
756		}
757		spin_unlock_irqrestore(&hardwall_lock, flags);
758	}
759
760	return 0;
761}
762
763/* This hardwall is gone, so destroy it. */
764static int hardwall_release(struct inode *inode, struct file *file)
765{
766	hardwall_destroy(file->private_data);
767	return 0;
768}
769
770static const struct file_operations dev_hardwall_fops = {
771	.unlocked_ioctl = hardwall_ioctl,
772#ifdef CONFIG_COMPAT
773	.compat_ioctl   = hardwall_compat_ioctl,
774#endif
775	.flush          = hardwall_flush,
776	.release        = hardwall_release,
777};
778
779static struct cdev hardwall_dev;
780
781static int __init dev_hardwall_init(void)
782{
783	int rc;
784	dev_t dev;
785
786	rc = alloc_chrdev_region(&dev, 0, 1, "hardwall");
787	if (rc < 0)
788		return rc;
789	cdev_init(&hardwall_dev, &dev_hardwall_fops);
790	rc = cdev_add(&hardwall_dev, dev, 1);
791	if (rc < 0)
792		return rc;
793
794	return 0;
795}
796late_initcall(dev_hardwall_init);
797