1/*
2 * Copyright (C) 1999-2002 Hewlett-Packard Co
3 *	David Mosberger-Tang <davidm@hpl.hp.com>
4 */
5/*
6 * This file implements call frame unwind support for the Linux
7 * kernel.  Parsing and processing the unwind information is
8 * time-consuming, so this implementation translates the unwind
9 * descriptors into unwind scripts.  These scripts are very simple
10 * (basically a sequence of assignments) and efficient to execute.
11 * They are cached for later re-use.  Each script is specific for a
12 * given instruction pointer address and the set of predicate values
13 * that the script depends on (most unwind descriptors are
14 * unconditional and scripts often do not depend on predicates at
15 * all).  This code is based on the unwind conventions described in
16 * the "IA-64 Software Conventions and Runtime Architecture" manual.
17 *
18 * SMP conventions:
19 *	o updates to the global unwind data (in structure "unw") are serialized
20 *	  by the unw.lock spinlock
21 *	o each unwind script has its own read-write lock; a thread must acquire
22 *	  a read lock before executing a script and must acquire a write lock
23 *	  before modifying a script
24 *	o if both the unw.lock spinlock and a script's read-write lock must be
25 *	  acquired, then the read-write lock must be acquired first.
26 */
27#include <linux/bootmem.h>
28#include <linux/kernel.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31
32#include <asm/unwind.h>
33
34#include <asm/delay.h>
35#include <asm/page.h>
36#include <asm/ptrace.h>
37#include <asm/ptrace_offsets.h>
38#include <asm/rse.h>
39#include <asm/system.h>
40#include <asm/uaccess.h>
41
42#include "entry.h"
43#include "unwind_i.h"
44
45#define MIN(a,b)	((a) < (b) ? (a) : (b))
46#define p5		5
47
48#define UNW_LOG_CACHE_SIZE	7	/* each unw_script is ~256 bytes in size */
49#define UNW_CACHE_SIZE		(1 << UNW_LOG_CACHE_SIZE)
50
51#define UNW_LOG_HASH_SIZE	(UNW_LOG_CACHE_SIZE + 1)
52#define UNW_HASH_SIZE		(1 << UNW_LOG_HASH_SIZE)
53
54#define UNW_DEBUG	0
55#define UNW_STATS	0	/* WARNING: this disabled interrupts for long time-spans!! */
56
57#if UNW_DEBUG
58  static long unw_debug_level = 255;
59# define debug(level,format...)	if (unw_debug_level > level) printk(format)
60# define dprintk(format...)	printk(format)
61# define inline
62#else
63# define debug(level,format...)
64# define dprintk(format...)
65#endif
66
67#if UNW_STATS
68# define STAT(x...)	x
69#else
70# define STAT(x...)
71#endif
72
73#define alloc_reg_state()	kmalloc(sizeof(struct unw_state_record), GFP_ATOMIC)
74#define free_reg_state(usr)	kfree(usr)
75#define alloc_labeled_state()	kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
76#define free_labeled_state(usr)	kfree(usr)
77
78typedef unsigned long unw_word;
79typedef unsigned char unw_hash_index_t;
80
81#define struct_offset(str,fld)	((char *)&((str *)NULL)->fld - (char *) 0)
82
83static struct {
84	spinlock_t lock;			/* spinlock for unwind data */
85
86	/* list of unwind tables (one per load-module) */
87	struct unw_table *tables;
88
89	/* table of registers that prologues can save (and order in which they're saved): */
90	const unsigned char save_order[8];
91
92	/* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
93	unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
94
95	unsigned short lru_head;		/* index of lead-recently used script */
96	unsigned short lru_tail;		/* index of most-recently used script */
97
98	/* index into unw_frame_info for preserved register i */
99	unsigned short preg_index[UNW_NUM_REGS];
100
101	/* unwind table for the kernel: */
102	struct unw_table kernel_table;
103
104	/* unwind table describing the gate page (kernel code that is mapped into user space): */
105	size_t gate_table_size;
106	unsigned long *gate_table;
107
108	/* hash table that maps instruction pointer to script index: */
109	unsigned short hash[UNW_HASH_SIZE];
110
111	/* script cache: */
112	struct unw_script cache[UNW_CACHE_SIZE];
113
114# if UNW_DEBUG
115	const char *preg_name[UNW_NUM_REGS];
116# endif
117# if UNW_STATS
118	struct {
119		struct {
120			int lookups;
121			int hinted_hits;
122			int normal_hits;
123			int collision_chain_traversals;
124		} cache;
125		struct {
126			unsigned long build_time;
127			unsigned long run_time;
128			unsigned long parse_time;
129			int builds;
130			int news;
131			int collisions;
132			int runs;
133		} script;
134		struct {
135			unsigned long init_time;
136			unsigned long unwind_time;
137			int inits;
138			int unwinds;
139		} api;
140	} stat;
141# endif
142} unw = {
143	tables: &unw.kernel_table,
144	lock: SPIN_LOCK_UNLOCKED,
145	save_order: {
146		UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
147		UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
148	},
149	preg_index: {
150		struct_offset(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_GR */
151		struct_offset(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_MEM */
152		struct_offset(struct unw_frame_info, bsp_loc)/8,
153		struct_offset(struct unw_frame_info, bspstore_loc)/8,
154		struct_offset(struct unw_frame_info, pfs_loc)/8,
155		struct_offset(struct unw_frame_info, rnat_loc)/8,
156		struct_offset(struct unw_frame_info, psp)/8,
157		struct_offset(struct unw_frame_info, rp_loc)/8,
158		struct_offset(struct unw_frame_info, r4)/8,
159		struct_offset(struct unw_frame_info, r5)/8,
160		struct_offset(struct unw_frame_info, r6)/8,
161		struct_offset(struct unw_frame_info, r7)/8,
162		struct_offset(struct unw_frame_info, unat_loc)/8,
163		struct_offset(struct unw_frame_info, pr_loc)/8,
164		struct_offset(struct unw_frame_info, lc_loc)/8,
165		struct_offset(struct unw_frame_info, fpsr_loc)/8,
166		struct_offset(struct unw_frame_info, b1_loc)/8,
167		struct_offset(struct unw_frame_info, b2_loc)/8,
168		struct_offset(struct unw_frame_info, b3_loc)/8,
169		struct_offset(struct unw_frame_info, b4_loc)/8,
170		struct_offset(struct unw_frame_info, b5_loc)/8,
171		struct_offset(struct unw_frame_info, f2_loc)/8,
172		struct_offset(struct unw_frame_info, f3_loc)/8,
173		struct_offset(struct unw_frame_info, f4_loc)/8,
174		struct_offset(struct unw_frame_info, f5_loc)/8,
175		struct_offset(struct unw_frame_info, fr_loc[16 - 16])/8,
176		struct_offset(struct unw_frame_info, fr_loc[17 - 16])/8,
177		struct_offset(struct unw_frame_info, fr_loc[18 - 16])/8,
178		struct_offset(struct unw_frame_info, fr_loc[19 - 16])/8,
179		struct_offset(struct unw_frame_info, fr_loc[20 - 16])/8,
180		struct_offset(struct unw_frame_info, fr_loc[21 - 16])/8,
181		struct_offset(struct unw_frame_info, fr_loc[22 - 16])/8,
182		struct_offset(struct unw_frame_info, fr_loc[23 - 16])/8,
183		struct_offset(struct unw_frame_info, fr_loc[24 - 16])/8,
184		struct_offset(struct unw_frame_info, fr_loc[25 - 16])/8,
185		struct_offset(struct unw_frame_info, fr_loc[26 - 16])/8,
186		struct_offset(struct unw_frame_info, fr_loc[27 - 16])/8,
187		struct_offset(struct unw_frame_info, fr_loc[28 - 16])/8,
188		struct_offset(struct unw_frame_info, fr_loc[29 - 16])/8,
189		struct_offset(struct unw_frame_info, fr_loc[30 - 16])/8,
190		struct_offset(struct unw_frame_info, fr_loc[31 - 16])/8,
191	},
192	hash : { [0 ... UNW_HASH_SIZE - 1] = -1 },
193#if UNW_DEBUG
194	preg_name: {
195		"pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
196		"r4", "r5", "r6", "r7",
197		"ar.unat", "pr", "ar.lc", "ar.fpsr",
198		"b1", "b2", "b3", "b4", "b5",
199		"f2", "f3", "f4", "f5",
200		"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
201		"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
202	}
203#endif
204};
205
206
207/* Unwind accessors.  */
208
209/*
210 * Returns offset of rREG in struct pt_regs.
211 */
212static inline unsigned long
213pt_regs_off (unsigned long reg)
214{
215	unsigned long off =0;
216
217	if (reg >= 1 && reg <= 3)
218		off = struct_offset(struct pt_regs, r1) + 8*(reg - 1);
219	else if (reg <= 11)
220		off = struct_offset(struct pt_regs, r8) + 8*(reg - 8);
221	else if (reg <= 15)
222		off = struct_offset(struct pt_regs, r12) + 8*(reg - 12);
223	else if (reg <= 31)
224		off = struct_offset(struct pt_regs, r16) + 8*(reg - 16);
225	else
226		dprintk("unwind: bad scratch reg r%lu\n", reg);
227	return off;
228}
229
230int
231unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
232{
233	unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
234	struct unw_ireg *ireg;
235	struct pt_regs *pt;
236
237	if ((unsigned) regnum - 1 >= 127) {
238		dprintk("unwind: trying to access non-existent r%u\n", regnum);
239		return -1;
240	}
241
242	if (regnum < 32) {
243		if (regnum >= 4 && regnum <= 7) {
244			/* access a preserved register */
245			ireg = &info->r4 + (regnum - 4);
246			addr = ireg->loc;
247			if (addr) {
248				nat_addr = addr + ireg->nat.off;
249				switch (ireg->nat.type) {
250				      case UNW_NAT_VAL:
251					/* simulate getf.sig/setf.sig */
252					if (write) {
253						if (*nat) {
254							/* write NaTVal and be done with it */
255							addr[0] = 0;
256							addr[1] = 0x1fffe;
257							return 0;
258						}
259						addr[1] = 0x1003e;
260					} else {
261						if (addr[0] == 0 && addr[1] == 0x1ffe) {
262							/* return NaT and be done with it */
263							*val = 0;
264							*nat = 1;
265							return 0;
266						}
267					}
268					/* fall through */
269				      case UNW_NAT_NONE:
270					dummy_nat = 0;
271					nat_addr = &dummy_nat;
272					break;
273
274				      case UNW_NAT_MEMSTK:
275					nat_mask = (1UL << ((long) addr & 0x1f8)/8);
276					break;
277
278				      case UNW_NAT_REGSTK:
279					nat_addr = ia64_rse_rnat_addr(addr);
280					if ((unsigned long) addr < info->regstk.limit
281					    || (unsigned long) addr >= info->regstk.top)
282					{
283						dprintk("unwind: %p outside of regstk "
284							"[0x%lx-0x%lx)\n", (void *) addr,
285							info->regstk.limit,
286							info->regstk.top);
287						return -1;
288					}
289					if ((unsigned long) nat_addr >= info->regstk.top)
290						nat_addr = &info->sw->ar_rnat;
291					nat_mask = (1UL << ia64_rse_slot_num(addr));
292					break;
293				}
294			} else {
295				addr = &info->sw->r4 + (regnum - 4);
296				nat_addr = &info->sw->ar_unat;
297				nat_mask = (1UL << ((long) addr & 0x1f8)/8);
298			}
299		} else {
300			/* access a scratch register */
301			if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
302				pt = (struct pt_regs *) info->psp - 1;
303			else
304				pt = (struct pt_regs *) info->sp - 1;
305			addr = (unsigned long *) ((long) pt + pt_regs_off(regnum));
306			if (info->pri_unat_loc)
307				nat_addr = info->pri_unat_loc;
308			else
309				nat_addr = &info->sw->ar_unat;
310			nat_mask = (1UL << ((long) addr & 0x1f8)/8);
311		}
312	} else {
313		/* access a stacked register */
314		addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
315		nat_addr = ia64_rse_rnat_addr(addr);
316		if ((unsigned long) addr < info->regstk.limit
317		    || (unsigned long) addr >= info->regstk.top)
318		{
319			dprintk("unwind: ignoring attempt to access register outside of rbs\n");
320			return -1;
321		}
322		if ((unsigned long) nat_addr >= info->regstk.top)
323			nat_addr = &info->sw->ar_rnat;
324		nat_mask = (1UL << ia64_rse_slot_num(addr));
325	}
326
327	if (write) {
328		*addr = *val;
329		if (*nat)
330			*nat_addr |= nat_mask;
331		else
332			*nat_addr &= ~nat_mask;
333	} else {
334		if ((*nat_addr & nat_mask) == 0) {
335			*val = *addr;
336			*nat = 0;
337		} else {
338			*val = 0;	/* if register is a NaT, *addr may contain kernel data! */
339			*nat = 1;
340		}
341	}
342	return 0;
343}
344
345int
346unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
347{
348	unsigned long *addr;
349	struct pt_regs *pt;
350
351	if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
352		pt = (struct pt_regs *) info->psp - 1;
353	else
354		pt = (struct pt_regs *) info->sp - 1;
355	switch (regnum) {
356		/* scratch: */
357	      case 0: addr = &pt->b0; break;
358	      case 6: addr = &pt->b6; break;
359	      case 7: addr = &pt->b7; break;
360
361		/* preserved: */
362	      case 1: case 2: case 3: case 4: case 5:
363		addr = *(&info->b1_loc + (regnum - 1));
364		if (!addr)
365			addr = &info->sw->b1 + (regnum - 1);
366		break;
367
368	      default:
369		dprintk("unwind: trying to access non-existent b%u\n", regnum);
370		return -1;
371	}
372	if (write)
373		*addr = *val;
374	else
375		*val = *addr;
376	return 0;
377}
378
379int
380unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
381{
382	struct ia64_fpreg *addr = 0;
383	struct pt_regs *pt;
384
385	if ((unsigned) (regnum - 2) >= 126) {
386		dprintk("unwind: trying to access non-existent f%u\n", regnum);
387		return -1;
388	}
389
390	if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
391		pt = (struct pt_regs *) info->psp - 1;
392	else
393		pt = (struct pt_regs *) info->sp - 1;
394
395	if (regnum <= 5) {
396		addr = *(&info->f2_loc + (regnum - 2));
397		if (!addr)
398			addr = &info->sw->f2 + (regnum - 2);
399	} else if (regnum <= 15) {
400		if (regnum <= 9)
401			addr = &pt->f6  + (regnum - 6);
402		else
403			addr = &info->sw->f10 + (regnum - 10);
404	} else if (regnum <= 31) {
405		addr = info->fr_loc[regnum - 16];
406		if (!addr)
407			addr = &info->sw->f16 + (regnum - 16);
408	} else {
409		struct task_struct *t = info->task;
410
411		if (write)
412			ia64_sync_fph(t);
413		else
414			ia64_flush_fph(t);
415		addr = t->thread.fph + (regnum - 32);
416	}
417
418	if (write)
419		*addr = *val;
420	else
421		*val = *addr;
422	return 0;
423}
424
425int
426unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
427{
428	unsigned long *addr;
429	struct pt_regs *pt;
430
431	if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
432		pt = (struct pt_regs *) info->psp - 1;
433	else
434		pt = (struct pt_regs *) info->sp - 1;
435
436	switch (regnum) {
437	      case UNW_AR_BSP:
438		addr = info->bsp_loc;
439		if (!addr)
440			addr = &info->sw->ar_bspstore;
441		break;
442
443	      case UNW_AR_BSPSTORE:
444		addr = info->bspstore_loc;
445		if (!addr)
446			addr = &info->sw->ar_bspstore;
447		break;
448
449	      case UNW_AR_PFS:
450		addr = info->pfs_loc;
451		if (!addr)
452			addr = &info->sw->ar_pfs;
453		break;
454
455	      case UNW_AR_RNAT:
456		addr = info->rnat_loc;
457		if (!addr)
458			addr = &info->sw->ar_rnat;
459		break;
460
461	      case UNW_AR_UNAT:
462		addr = info->unat_loc;
463		if (!addr)
464			addr = &info->sw->ar_unat;
465		break;
466
467	      case UNW_AR_LC:
468		addr = info->lc_loc;
469		if (!addr)
470			addr = &info->sw->ar_lc;
471		break;
472
473	      case UNW_AR_EC:
474		if (!info->cfm_loc)
475			return -1;
476		if (write)
477			*info->cfm_loc =
478				(*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
479		else
480			*val = (*info->cfm_loc >> 52) & 0x3f;
481		return 0;
482
483	      case UNW_AR_FPSR:
484		addr = info->fpsr_loc;
485		if (!addr)
486			addr = &info->sw->ar_fpsr;
487		break;
488
489	      case UNW_AR_RSC:
490		addr = &pt->ar_rsc;
491		break;
492
493	      case UNW_AR_CCV:
494		addr = &pt->ar_ccv;
495		break;
496
497	      default:
498		dprintk("unwind: trying to access non-existent ar%u\n", regnum);
499		return -1;
500	}
501
502	if (write)
503		*addr = *val;
504	else
505		*val = *addr;
506	return 0;
507}
508
509int
510unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
511{
512	unsigned long *addr;
513
514	addr = info->pr_loc;
515	if (!addr)
516		addr = &info->sw->pr;
517
518	if (write)
519		*addr = *val;
520	else
521		*val = *addr;
522	return 0;
523}
524
525
526/* Routines to manipulate the state stack.  */
527
528static inline void
529push (struct unw_state_record *sr)
530{
531	struct unw_reg_state *rs;
532
533	rs = alloc_reg_state();
534	if (!rs) {
535		printk("unwind: cannot stack reg state!\n");
536		return;
537	}
538	memcpy(rs, &sr->curr, sizeof(*rs));
539	sr->curr.next = rs;
540}
541
542static void
543pop (struct unw_state_record *sr)
544{
545	struct unw_reg_state *rs = sr->curr.next;
546
547	if (!rs) {
548		printk("unwind: stack underflow!\n");
549		return;
550	}
551	memcpy(&sr->curr, rs, sizeof(*rs));
552	free_reg_state(rs);
553}
554
555/* Make a copy of the state stack.  Non-recursive to avoid stack overflows.  */
556static struct unw_reg_state *
557dup_state_stack (struct unw_reg_state *rs)
558{
559	struct unw_reg_state *copy, *prev = NULL, *first = NULL;
560
561	while (rs) {
562		copy = alloc_reg_state();
563		if (!copy) {
564			printk ("unwind.dup_state_stack: out of memory\n");
565			return NULL;
566		}
567		memcpy(copy, rs, sizeof(*copy));
568		if (first)
569			prev->next = copy;
570		else
571			first = copy;
572		rs = rs->next;
573		prev = copy;
574	}
575	return first;
576}
577
578/* Free all stacked register states (but not RS itself).  */
579static void
580free_state_stack (struct unw_reg_state *rs)
581{
582	struct unw_reg_state *p, *next;
583
584	for (p = rs->next; p != NULL; p = next) {
585		next = p->next;
586		free_reg_state(p);
587	}
588	rs->next = NULL;
589}
590
591/* Unwind decoder routines */
592
593static enum unw_register_index __attribute__((const))
594decode_abreg (unsigned char abreg, int memory)
595{
596	switch (abreg) {
597	      case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
598	      case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
599	      case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
600	      case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
601	      case 0x60: return UNW_REG_PR;
602	      case 0x61: return UNW_REG_PSP;
603	      case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
604	      case 0x63: return UNW_REG_RP;
605	      case 0x64: return UNW_REG_BSP;
606	      case 0x65: return UNW_REG_BSPSTORE;
607	      case 0x66: return UNW_REG_RNAT;
608	      case 0x67: return UNW_REG_UNAT;
609	      case 0x68: return UNW_REG_FPSR;
610	      case 0x69: return UNW_REG_PFS;
611	      case 0x6a: return UNW_REG_LC;
612	      default:
613		break;
614	}
615	dprintk("unwind: bad abreg=0x%x\n", abreg);
616	return UNW_REG_LC;
617}
618
619static void
620set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
621{
622	reg->val = val;
623	reg->where = where;
624	if (reg->when == UNW_WHEN_NEVER)
625		reg->when = when;
626}
627
628static void
629alloc_spill_area (unsigned long *offp, unsigned long regsize,
630		  struct unw_reg_info *lo, struct unw_reg_info *hi)
631{
632	struct unw_reg_info *reg;
633
634	for (reg = hi; reg >= lo; --reg) {
635		if (reg->where == UNW_WHERE_SPILL_HOME) {
636			reg->where = UNW_WHERE_PSPREL;
637			*offp -= regsize;
638			reg->val = *offp;
639		}
640	}
641}
642
643static inline void
644spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
645{
646	struct unw_reg_info *reg;
647
648	for (reg = *regp; reg <= lim; ++reg) {
649		if (reg->where == UNW_WHERE_SPILL_HOME) {
650			reg->when = t;
651			*regp = reg + 1;
652			return;
653		}
654	}
655	dprintk("unwind: excess spill!\n");
656}
657
658static inline void
659finish_prologue (struct unw_state_record *sr)
660{
661	struct unw_reg_info *reg;
662	unsigned long off;
663	int i;
664
665	/*
666	 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
667	 * for Using Unwind Descriptors", rule 3):
668	 */
669	for (i = 0; i < (int) sizeof(unw.save_order)/sizeof(unw.save_order[0]); ++i) {
670		reg = sr->curr.reg + unw.save_order[i];
671		if (reg->where == UNW_WHERE_GR_SAVE) {
672			reg->where = UNW_WHERE_GR;
673			reg->val = sr->gr_save_loc++;
674		}
675	}
676
677	/*
678	 * Next, compute when the fp, general, and branch registers get
679	 * saved.  This must come before alloc_spill_area() because
680	 * we need to know which registers are spilled to their home
681	 * locations.
682	 */
683	if (sr->imask) {
684		unsigned char kind, mask = 0, *cp = sr->imask;
685		unsigned long t;
686		static const unsigned char limit[3] = {
687			UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
688		};
689		struct unw_reg_info *(regs[3]);
690
691		regs[0] = sr->curr.reg + UNW_REG_F2;
692		regs[1] = sr->curr.reg + UNW_REG_R4;
693		regs[2] = sr->curr.reg + UNW_REG_B1;
694
695		for (t = 0; t < sr->region_len; ++t) {
696			if ((t & 3) == 0)
697				mask = *cp++;
698			kind = (mask >> 2*(3-(t & 3))) & 3;
699			if (kind > 0)
700				spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
701						sr->region_start + t);
702		}
703	}
704	/*
705	 * Next, lay out the memory stack spill area:
706	 */
707	if (sr->any_spills) {
708		off = sr->spill_offset;
709		alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
710		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
711		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
712	}
713}
714
715/*
716 * Region header descriptors.
717 */
718
719static void
720desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
721	       struct unw_state_record *sr)
722{
723	int i;
724
725	if (!(sr->in_body || sr->first_region))
726		finish_prologue(sr);
727	sr->first_region = 0;
728
729	/* check if we're done: */
730	if (sr->when_target < sr->region_start + sr->region_len) {
731		sr->done = 1;
732		return;
733	}
734
735	for (i = 0; i < sr->epilogue_count; ++i)
736		pop(sr);
737	sr->epilogue_count = 0;
738	sr->epilogue_start = UNW_WHEN_NEVER;
739
740	if (!body)
741		push(sr);
742
743	sr->region_start += sr->region_len;
744	sr->region_len = rlen;
745	sr->in_body = body;
746
747	if (!body) {
748		for (i = 0; i < 4; ++i) {
749			if (mask & 0x8)
750				set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
751					sr->region_start + sr->region_len - 1, grsave++);
752			mask <<= 1;
753		}
754		sr->gr_save_loc = grsave;
755		sr->any_spills = 0;
756		sr->imask = 0;
757		sr->spill_offset = 0x10;	/* default to psp+16 */
758	}
759}
760
761/*
762 * Prologue descriptors.
763 */
764
765static inline void
766desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
767{
768	if (abi == 0 && context == 'i')
769		sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
770	else
771		dprintk("unwind: ignoring unwabi(abi=0x%x,context=0x%x)\n", abi, context);
772}
773
774static inline void
775desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
776{
777	int i;
778
779	for (i = 0; i < 5; ++i) {
780		if (brmask & 1)
781			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
782				sr->region_start + sr->region_len - 1, gr++);
783		brmask >>= 1;
784	}
785}
786
787static inline void
788desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
789{
790	int i;
791
792	for (i = 0; i < 5; ++i) {
793		if (brmask & 1) {
794			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
795				sr->region_start + sr->region_len - 1, 0);
796			sr->any_spills = 1;
797		}
798		brmask >>= 1;
799	}
800}
801
802static inline void
803desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
804{
805	int i;
806
807	for (i = 0; i < 4; ++i) {
808		if ((grmask & 1) != 0) {
809			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
810				sr->region_start + sr->region_len - 1, 0);
811			sr->any_spills = 1;
812		}
813		grmask >>= 1;
814	}
815	for (i = 0; i < 20; ++i) {
816		if ((frmask & 1) != 0) {
817			int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
818			set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
819				sr->region_start + sr->region_len - 1, 0);
820			sr->any_spills = 1;
821		}
822		frmask >>= 1;
823	}
824}
825
826static inline void
827desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
828{
829	int i;
830
831	for (i = 0; i < 4; ++i) {
832		if ((frmask & 1) != 0) {
833			set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
834				sr->region_start + sr->region_len - 1, 0);
835			sr->any_spills = 1;
836		}
837		frmask >>= 1;
838	}
839}
840
841static inline void
842desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
843{
844	int i;
845
846	for (i = 0; i < 4; ++i) {
847		if ((grmask & 1) != 0)
848			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
849				sr->region_start + sr->region_len - 1, gr++);
850		grmask >>= 1;
851	}
852}
853
854static inline void
855desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
856{
857	int i;
858
859	for (i = 0; i < 4; ++i) {
860		if ((grmask & 1) != 0) {
861			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
862				sr->region_start + sr->region_len - 1, 0);
863			sr->any_spills = 1;
864		}
865		grmask >>= 1;
866	}
867}
868
869static inline void
870desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
871{
872	set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
873		sr->region_start + MIN((int)t, sr->region_len - 1), 16*size);
874}
875
876static inline void
877desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
878{
879	sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1);
880}
881
882static inline void
883desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
884{
885	set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
886}
887
888static inline void
889desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
890{
891	set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
892		0x10 - 4*pspoff);
893}
894
895static inline void
896desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
897{
898	set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
899		4*spoff);
900}
901
902static inline void
903desc_rp_br (unsigned char dst, struct unw_state_record *sr)
904{
905	sr->return_link_reg = dst;
906}
907
908static inline void
909desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
910{
911	struct unw_reg_info *reg = sr->curr.reg + regnum;
912
913	if (reg->where == UNW_WHERE_NONE)
914		reg->where = UNW_WHERE_GR_SAVE;
915	reg->when = sr->region_start + MIN((int)t, sr->region_len - 1);
916}
917
918static inline void
919desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
920{
921	sr->spill_offset = 0x10 - 4*pspoff;
922}
923
924static inline unsigned char *
925desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
926{
927	sr->imask = imaskp;
928	return imaskp + (2*sr->region_len + 7)/8;
929}
930
931/*
932 * Body descriptors.
933 */
934static inline void
935desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
936{
937	sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
938	sr->epilogue_count = ecount + 1;
939}
940
941static inline void
942desc_copy_state (unw_word label, struct unw_state_record *sr)
943{
944	struct unw_labeled_state *ls;
945
946	for (ls = sr->labeled_states; ls; ls = ls->next) {
947		if (ls->label == label) {
948			free_state_stack(&sr->curr);
949			memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
950			sr->curr.next = dup_state_stack(ls->saved_state.next);
951			return;
952		}
953	}
954	printk("unwind: failed to find state labeled 0x%lx\n", label);
955}
956
957static inline void
958desc_label_state (unw_word label, struct unw_state_record *sr)
959{
960	struct unw_labeled_state *ls;
961
962	ls = alloc_labeled_state();
963	if (!ls) {
964		printk("unwind.desc_label_state(): out of memory\n");
965		return;
966	}
967	ls->label = label;
968	memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
969	ls->saved_state.next = dup_state_stack(sr->curr.next);
970
971	/* insert into list of labeled states: */
972	ls->next = sr->labeled_states;
973	sr->labeled_states = ls;
974}
975
976/*
977 * General descriptors.
978 */
979
980static inline int
981desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
982{
983	if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1))
984		return 0;
985	if (qp > 0) {
986		if ((sr->pr_val & (1UL << qp)) == 0)
987			return 0;
988		sr->pr_mask |= (1UL << qp);
989	}
990	return 1;
991}
992
993static inline void
994desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
995{
996	struct unw_reg_info *r;
997
998	if (!desc_is_active(qp, t, sr))
999		return;
1000
1001	r = sr->curr.reg + decode_abreg(abreg, 0);
1002	r->where = UNW_WHERE_NONE;
1003	r->when = UNW_WHEN_NEVER;
1004	r->val = 0;
1005}
1006
1007static inline void
1008desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1009		     unsigned char ytreg, struct unw_state_record *sr)
1010{
1011	enum unw_where where = UNW_WHERE_GR;
1012	struct unw_reg_info *r;
1013
1014	if (!desc_is_active(qp, t, sr))
1015		return;
1016
1017	if (x)
1018		where = UNW_WHERE_BR;
1019	else if (ytreg & 0x80)
1020		where = UNW_WHERE_FR;
1021
1022	r = sr->curr.reg + decode_abreg(abreg, 0);
1023	r->where = where;
1024	r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1025	r->val = (ytreg & 0x7f);
1026}
1027
1028static inline void
1029desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1030		     struct unw_state_record *sr)
1031{
1032	struct unw_reg_info *r;
1033
1034	if (!desc_is_active(qp, t, sr))
1035		return;
1036
1037	r = sr->curr.reg + decode_abreg(abreg, 1);
1038	r->where = UNW_WHERE_PSPREL;
1039	r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1040	r->val = 0x10 - 4*pspoff;
1041}
1042
1043static inline void
1044desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1045		       struct unw_state_record *sr)
1046{
1047	struct unw_reg_info *r;
1048
1049	if (!desc_is_active(qp, t, sr))
1050		return;
1051
1052	r = sr->curr.reg + decode_abreg(abreg, 1);
1053	r->where = UNW_WHERE_SPREL;
1054	r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1055	r->val = 4*spoff;
1056}
1057
1058#define UNW_DEC_BAD_CODE(code)			printk("unwind: unknown code 0x%02x\n", code);
1059
1060/*
1061 * region headers:
1062 */
1063#define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg)	desc_prologue(0,r,m,gr,arg)
1064#define UNW_DEC_PROLOGUE(fmt,b,r,arg)		desc_prologue(b,r,0,32,arg)
1065/*
1066 * prologue descriptors:
1067 */
1068#define UNW_DEC_ABI(fmt,a,c,arg)		desc_abi(a,c,arg)
1069#define UNW_DEC_BR_GR(fmt,b,g,arg)		desc_br_gr(b,g,arg)
1070#define UNW_DEC_BR_MEM(fmt,b,arg)		desc_br_mem(b,arg)
1071#define UNW_DEC_FRGR_MEM(fmt,g,f,arg)		desc_frgr_mem(g,f,arg)
1072#define UNW_DEC_FR_MEM(fmt,f,arg)		desc_fr_mem(f,arg)
1073#define UNW_DEC_GR_GR(fmt,m,g,arg)		desc_gr_gr(m,g,arg)
1074#define UNW_DEC_GR_MEM(fmt,m,arg)		desc_gr_mem(m,arg)
1075#define UNW_DEC_MEM_STACK_F(fmt,t,s,arg)	desc_mem_stack_f(t,s,arg)
1076#define UNW_DEC_MEM_STACK_V(fmt,t,arg)		desc_mem_stack_v(t,arg)
1077#define UNW_DEC_REG_GR(fmt,r,d,arg)		desc_reg_gr(r,d,arg)
1078#define UNW_DEC_REG_PSPREL(fmt,r,o,arg)		desc_reg_psprel(r,o,arg)
1079#define UNW_DEC_REG_SPREL(fmt,r,o,arg)		desc_reg_sprel(r,o,arg)
1080#define UNW_DEC_REG_WHEN(fmt,r,t,arg)		desc_reg_when(r,t,arg)
1081#define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1082#define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1083#define UNW_DEC_PRIUNAT_GR(fmt,r,arg)		desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1084#define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg)	desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1085#define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg)	desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1086#define UNW_DEC_RP_BR(fmt,d,arg)		desc_rp_br(d,arg)
1087#define UNW_DEC_SPILL_BASE(fmt,o,arg)		desc_spill_base(o,arg)
1088#define UNW_DEC_SPILL_MASK(fmt,m,arg)		(m = desc_spill_mask(m,arg))
1089/*
1090 * body descriptors:
1091 */
1092#define UNW_DEC_EPILOGUE(fmt,t,c,arg)		desc_epilogue(t,c,arg)
1093#define UNW_DEC_COPY_STATE(fmt,l,arg)		desc_copy_state(l,arg)
1094#define UNW_DEC_LABEL_STATE(fmt,l,arg)		desc_label_state(l,arg)
1095/*
1096 * general unwind descriptors:
1097 */
1098#define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg)	desc_spill_reg_p(p,t,a,x,y,arg)
1099#define UNW_DEC_SPILL_REG(f,t,a,x,y,arg)	desc_spill_reg_p(0,t,a,x,y,arg)
1100#define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg)	desc_spill_psprel_p(p,t,a,o,arg)
1101#define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg)	desc_spill_psprel_p(0,t,a,o,arg)
1102#define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg)	desc_spill_sprel_p(p,t,a,o,arg)
1103#define UNW_DEC_SPILL_SPREL(f,t,a,o,arg)	desc_spill_sprel_p(0,t,a,o,arg)
1104#define UNW_DEC_RESTORE_P(f,p,t,a,arg)		desc_restore_p(p,t,a,arg)
1105#define UNW_DEC_RESTORE(f,t,a,arg)		desc_restore_p(0,t,a,arg)
1106
1107#include "unwind_decoder.c"
1108
1109
1110/* Unwind scripts. */
1111
1112static inline unw_hash_index_t
1113hash (unsigned long ip)
1114{
1115#	define magic	0x9e3779b97f4a7c16	/* based on (sqrt(5)/2-1)*2^64 */
1116
1117	return (ip >> 4)*magic >> (64 - UNW_LOG_HASH_SIZE);
1118}
1119
1120static inline long
1121cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1122{
1123	read_lock(&script->lock);
1124	if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1125		/* keep the read lock... */
1126		return 1;
1127	read_unlock(&script->lock);
1128	return 0;
1129}
1130
1131static inline struct unw_script *
1132script_lookup (struct unw_frame_info *info)
1133{
1134	struct unw_script *script = unw.cache + info->hint;
1135	unsigned short index;
1136	unsigned long ip, pr;
1137
1138	STAT(++unw.stat.cache.lookups);
1139
1140	ip = info->ip;
1141	pr = info->pr;
1142
1143	if (cache_match(script, ip, pr)) {
1144		STAT(++unw.stat.cache.hinted_hits);
1145		return script;
1146	}
1147
1148	index = unw.hash[hash(ip)];
1149	if (index >= UNW_CACHE_SIZE)
1150		return 0;
1151
1152	script = unw.cache + index;
1153	while (1) {
1154		if (cache_match(script, ip, pr)) {
1155			/* update hint; no locking required as single-word writes are atomic */
1156			STAT(++unw.stat.cache.normal_hits);
1157			unw.cache[info->prev_script].hint = script - unw.cache;
1158			return script;
1159		}
1160		if (script->coll_chain >= UNW_HASH_SIZE)
1161			return 0;
1162		script = unw.cache + script->coll_chain;
1163		STAT(++unw.stat.cache.collision_chain_traversals);
1164	}
1165}
1166
1167/*
1168 * On returning, a write lock for the SCRIPT is still being held.
1169 */
1170static inline struct unw_script *
1171script_new (unsigned long ip)
1172{
1173	struct unw_script *script, *prev, *tmp;
1174	unw_hash_index_t index;
1175	unsigned long flags;
1176	unsigned short head;
1177
1178	STAT(++unw.stat.script.news);
1179
1180	/*
1181	 * Can't (easily) use cmpxchg() here because of ABA problem
1182	 * that is intrinsic in cmpxchg()...
1183	 */
1184	spin_lock_irqsave(&unw.lock, flags);
1185	{
1186		head = unw.lru_head;
1187		script = unw.cache + head;
1188		unw.lru_head = script->lru_chain;
1189	}
1190	spin_unlock(&unw.lock);
1191
1192	write_lock(&script->lock);
1193
1194	spin_lock(&unw.lock);
1195	{
1196		/* re-insert script at the tail of the LRU chain: */
1197		unw.cache[unw.lru_tail].lru_chain = head;
1198		unw.lru_tail = head;
1199
1200		/* remove the old script from the hash table (if it's there): */
1201		if (script->ip) {
1202			index = hash(script->ip);
1203			tmp = unw.cache + unw.hash[index];
1204			prev = 0;
1205			while (1) {
1206				if (tmp == script) {
1207					if (prev)
1208						prev->coll_chain = tmp->coll_chain;
1209					else
1210						unw.hash[index] = tmp->coll_chain;
1211					break;
1212				} else
1213					prev = tmp;
1214				if (tmp->coll_chain >= UNW_CACHE_SIZE)
1215				/* old script wasn't in the hash-table */
1216					break;
1217				tmp = unw.cache + tmp->coll_chain;
1218			}
1219		}
1220
1221		/* enter new script in the hash table */
1222		index = hash(ip);
1223		script->coll_chain = unw.hash[index];
1224		unw.hash[index] = script - unw.cache;
1225
1226		script->ip = ip;	/* set new IP while we're holding the locks */
1227
1228		STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1229	}
1230	spin_unlock_irqrestore(&unw.lock, flags);
1231
1232	script->flags = 0;
1233	script->hint = 0;
1234	script->count = 0;
1235	return script;
1236}
1237
1238static void
1239script_finalize (struct unw_script *script, struct unw_state_record *sr)
1240{
1241	script->pr_mask = sr->pr_mask;
1242	script->pr_val = sr->pr_val;
1243	/*
1244	 * We could down-grade our write-lock on script->lock here but
1245	 * the rwlock API doesn't offer atomic lock downgrading, so
1246	 * we'll just keep the write-lock and release it later when
1247	 * we're done using the script.
1248	 */
1249}
1250
1251static inline void
1252script_emit (struct unw_script *script, struct unw_insn insn)
1253{
1254	if (script->count >= UNW_MAX_SCRIPT_LEN) {
1255		dprintk("unwind: script exceeds maximum size of %u instructions!\n",
1256			UNW_MAX_SCRIPT_LEN);
1257		return;
1258	}
1259	script->insn[script->count++] = insn;
1260}
1261
1262static inline void
1263emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1264{
1265	struct unw_reg_info *r = sr->curr.reg + i;
1266	enum unw_insn_opcode opc;
1267	struct unw_insn insn;
1268	unsigned long val = 0;
1269
1270	switch (r->where) {
1271	      case UNW_WHERE_GR:
1272		if (r->val >= 32) {
1273			/* register got spilled to a stacked register */
1274			opc = UNW_INSN_SETNAT_TYPE;
1275			val = UNW_NAT_REGSTK;
1276		} else
1277			/* register got spilled to a scratch register */
1278			opc = UNW_INSN_SETNAT_MEMSTK;
1279		break;
1280
1281	      case UNW_WHERE_FR:
1282		opc = UNW_INSN_SETNAT_TYPE;
1283		val = UNW_NAT_VAL;
1284		break;
1285
1286	      case UNW_WHERE_BR:
1287		opc = UNW_INSN_SETNAT_TYPE;
1288		val = UNW_NAT_NONE;
1289		break;
1290
1291	      case UNW_WHERE_PSPREL:
1292	      case UNW_WHERE_SPREL:
1293		opc = UNW_INSN_SETNAT_MEMSTK;
1294		break;
1295
1296	      default:
1297		dprintk("unwind: don't know how to emit nat info for where = %u\n", r->where);
1298		return;
1299	}
1300	insn.opc = opc;
1301	insn.dst = unw.preg_index[i];
1302	insn.val = val;
1303	script_emit(script, insn);
1304}
1305
1306static void
1307compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1308{
1309	struct unw_reg_info *r = sr->curr.reg + i;
1310	enum unw_insn_opcode opc;
1311	unsigned long val, rval;
1312	struct unw_insn insn;
1313	long need_nat_info;
1314
1315	if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1316		return;
1317
1318	opc = UNW_INSN_MOVE;
1319	val = rval = r->val;
1320	need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1321
1322	switch (r->where) {
1323	      case UNW_WHERE_GR:
1324		if (rval >= 32) {
1325			opc = UNW_INSN_MOVE_STACKED;
1326			val = rval - 32;
1327		} else if (rval >= 4 && rval <= 7) {
1328			if (need_nat_info) {
1329				opc = UNW_INSN_MOVE2;
1330				need_nat_info = 0;
1331			}
1332			val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1333		} else {
1334			opc = UNW_INSN_ADD_SP;
1335			val = -sizeof(struct pt_regs) + pt_regs_off(rval);
1336		}
1337		break;
1338
1339	      case UNW_WHERE_FR:
1340		if (rval <= 5)
1341			val = unw.preg_index[UNW_REG_F2  + (rval -  1)];
1342		else if (rval >= 16 && rval <= 31)
1343			val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1344		else {
1345			opc = UNW_INSN_ADD_SP;
1346			val = -sizeof(struct pt_regs);
1347			if (rval <= 9)
1348				val += struct_offset(struct pt_regs, f6) + 16*(rval - 6);
1349			else
1350				dprintk("unwind: kernel may not touch f%lu\n", rval);
1351		}
1352		break;
1353
1354	      case UNW_WHERE_BR:
1355		if (rval >= 1 && rval <= 5)
1356			val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1357		else {
1358			opc = UNW_INSN_ADD_SP;
1359			val = -sizeof(struct pt_regs);
1360			if (rval == 0)
1361				val += struct_offset(struct pt_regs, b0);
1362			else if (rval == 6)
1363				val += struct_offset(struct pt_regs, b6);
1364			else
1365				val += struct_offset(struct pt_regs, b7);
1366		}
1367		break;
1368
1369	      case UNW_WHERE_SPREL:
1370		opc = UNW_INSN_ADD_SP;
1371		break;
1372
1373	      case UNW_WHERE_PSPREL:
1374		opc = UNW_INSN_ADD_PSP;
1375		break;
1376
1377	      default:
1378		dprintk("unwind: register %u has unexpected `where' value of %u\n", i, r->where);
1379		break;
1380	}
1381	insn.opc = opc;
1382	insn.dst = unw.preg_index[i];
1383	insn.val = val;
1384	script_emit(script, insn);
1385	if (need_nat_info)
1386		emit_nat_info(sr, i, script);
1387
1388	if (i == UNW_REG_PSP) {
1389		/*
1390		 * info->psp must contain the _value_ of the previous
1391		 * sp, not it's save location.  We get this by
1392		 * dereferencing the value we just stored in
1393		 * info->psp:
1394		 */
1395		insn.opc = UNW_INSN_LOAD;
1396		insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1397		script_emit(script, insn);
1398	}
1399}
1400
1401static inline const struct unw_table_entry *
1402lookup (struct unw_table *table, unsigned long rel_ip)
1403{
1404	const struct unw_table_entry *e = 0;
1405	unsigned long lo, hi, mid;
1406
1407	/* do a binary search for right entry: */
1408	for (lo = 0, hi = table->length; lo < hi; ) {
1409		mid = (lo + hi) / 2;
1410		e = &table->array[mid];
1411		if (rel_ip < e->start_offset)
1412			hi = mid;
1413		else if (rel_ip >= e->end_offset)
1414			lo = mid + 1;
1415		else
1416			break;
1417	}
1418	if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1419		return NULL;
1420	return e;
1421}
1422
1423/*
1424 * Build an unwind script that unwinds from state OLD_STATE to the
1425 * entrypoint of the function that called OLD_STATE.
1426 */
1427static inline struct unw_script *
1428build_script (struct unw_frame_info *info)
1429{
1430	const struct unw_table_entry *e = 0;
1431	struct unw_script *script = 0;
1432	struct unw_labeled_state *ls, *next;
1433	unsigned long ip = info->ip;
1434	struct unw_state_record sr;
1435	struct unw_table *table;
1436	struct unw_reg_info *r;
1437	struct unw_insn insn;
1438	u8 *dp, *desc_end;
1439	u64 hdr;
1440	int i;
1441	STAT(unsigned long start, parse_start;)
1442
1443	STAT(++unw.stat.script.builds; start = ia64_get_itc());
1444
1445	/* build state record */
1446	memset(&sr, 0, sizeof(sr));
1447	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1448		r->when = UNW_WHEN_NEVER;
1449	sr.pr_val = info->pr;
1450
1451	script = script_new(ip);
1452	if (!script) {
1453		dprintk("unwind: failed to create unwind script\n");
1454		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1455		return 0;
1456	}
1457	unw.cache[info->prev_script].hint = script - unw.cache;
1458
1459	/* search the kernels and the modules' unwind tables for IP: */
1460
1461	STAT(parse_start = ia64_get_itc());
1462
1463	for (table = unw.tables; table; table = table->next) {
1464		if (ip >= table->start && ip < table->end) {
1465			e = lookup(table, ip - table->segment_base);
1466			break;
1467		}
1468	}
1469	if (!e) {
1470		/* no info, return default unwinder (leaf proc, no mem stack, no saved regs)  */
1471		dprintk("unwind: no unwind info for ip=0x%lx (prev ip=0x%lx)\n", ip,
1472			unw.cache[info->prev_script].ip);
1473		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1474		sr.curr.reg[UNW_REG_RP].when = -1;
1475		sr.curr.reg[UNW_REG_RP].val = 0;
1476		compile_reg(&sr, UNW_REG_RP, script);
1477		script_finalize(script, &sr);
1478		STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1479		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1480		return script;
1481	}
1482
1483	sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1484			  + (ip & 0xfUL));
1485	hdr = *(u64 *) (table->segment_base + e->info_offset);
1486	dp =   (u8 *)  (table->segment_base + e->info_offset + 8);
1487	desc_end = dp + 8*UNW_LENGTH(hdr);
1488
1489	while (!sr.done && dp < desc_end)
1490		dp = unw_decode(dp, sr.in_body, &sr);
1491
1492	if (sr.when_target > sr.epilogue_start) {
1493		/*
1494		 * sp has been restored and all values on the memory stack below
1495		 * psp also have been restored.
1496		 */
1497		sr.curr.reg[UNW_REG_PSP].val = 0;
1498		sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1499		sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1500		for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1501			if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1502			    || r->where == UNW_WHERE_SPREL)
1503			{
1504				r->val = 0;
1505				r->where = UNW_WHERE_NONE;
1506				r->when = UNW_WHEN_NEVER;
1507			}
1508	}
1509
1510	script->flags = sr.flags;
1511
1512	/*
1513	 * If RP did't get saved, generate entry for the return link
1514	 * register.
1515	 */
1516	if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1517		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1518		sr.curr.reg[UNW_REG_RP].when = -1;
1519		sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1520	}
1521
1522#if UNW_DEBUG
1523	printk("unwind: state record for func 0x%lx, t=%u:\n",
1524	       table->segment_base + e->start_offset, sr.when_target);
1525	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1526		if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1527			printk("  %s <- ", unw.preg_name[r - sr.curr.reg]);
1528			switch (r->where) {
1529			      case UNW_WHERE_GR:     printk("r%lu", r->val); break;
1530			      case UNW_WHERE_FR:     printk("f%lu", r->val); break;
1531			      case UNW_WHERE_BR:     printk("b%lu", r->val); break;
1532			      case UNW_WHERE_SPREL:  printk("[sp+0x%lx]", r->val); break;
1533			      case UNW_WHERE_PSPREL: printk("[psp+0x%lx]", r->val); break;
1534			      case UNW_WHERE_NONE:
1535				printk("%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1536				break;
1537			      default:		     printk("BADWHERE(%d)", r->where); break;
1538			}
1539			printk("\t\t%d\n", r->when);
1540		}
1541	}
1542#endif
1543
1544	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1545
1546	/* translate state record into unwinder instructions: */
1547
1548	/*
1549	 * First, set psp if we're dealing with a fixed-size frame;
1550	 * subsequent instructions may depend on this value.
1551	 */
1552	if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1553	    && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1554	    && sr.curr.reg[UNW_REG_PSP].val != 0) {
1555		/* new psp is sp plus frame size */
1556		insn.opc = UNW_INSN_ADD;
1557		insn.dst = struct_offset(struct unw_frame_info, psp)/8;
1558		insn.val = sr.curr.reg[UNW_REG_PSP].val;	/* frame size */
1559		script_emit(script, insn);
1560	}
1561
1562	/* determine where the primary UNaT is: */
1563	if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1564		i = UNW_REG_PRI_UNAT_MEM;
1565	else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1566		i = UNW_REG_PRI_UNAT_GR;
1567	else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1568		i = UNW_REG_PRI_UNAT_MEM;
1569	else
1570		i = UNW_REG_PRI_UNAT_GR;
1571
1572	compile_reg(&sr, i, script);
1573
1574	for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1575		compile_reg(&sr, i, script);
1576
1577	/* free labeled register states & stack: */
1578
1579	STAT(parse_start = ia64_get_itc());
1580	for (ls = sr.labeled_states; ls; ls = next) {
1581		next = ls->next;
1582		free_state_stack(&ls->saved_state);
1583		free_labeled_state(ls);
1584	}
1585	free_state_stack(&sr.curr);
1586	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1587
1588	script_finalize(script, &sr);
1589	STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1590	return script;
1591}
1592
1593/*
1594 * Apply the unwinding actions represented by OPS and update SR to
1595 * reflect the state that existed upon entry to the function that this
1596 * unwinder represents.
1597 */
1598static inline void
1599run_script (struct unw_script *script, struct unw_frame_info *state)
1600{
1601	struct unw_insn *ip, *limit, next_insn;
1602	unsigned long opc, dst, val, off;
1603	unsigned long *s = (unsigned long *) state;
1604	STAT(unsigned long start;)
1605
1606	STAT(++unw.stat.script.runs; start = ia64_get_itc());
1607	state->flags = script->flags;
1608	ip = script->insn;
1609	limit = script->insn + script->count;
1610	next_insn = *ip;
1611
1612	while (ip++ < limit) {
1613		opc = next_insn.opc;
1614		dst = next_insn.dst;
1615		val = next_insn.val;
1616		next_insn = *ip;
1617
1618	  redo:
1619		switch (opc) {
1620		      case UNW_INSN_ADD:
1621			s[dst] += val;
1622			break;
1623
1624		      case UNW_INSN_MOVE2:
1625			if (!s[val])
1626				goto lazy_init;
1627			s[dst+1] = s[val+1];
1628			s[dst] = s[val];
1629			break;
1630
1631		      case UNW_INSN_MOVE:
1632			if (!s[val])
1633				goto lazy_init;
1634			s[dst] = s[val];
1635			break;
1636
1637		      case UNW_INSN_MOVE_STACKED:
1638			s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1639								    val);
1640			break;
1641
1642		      case UNW_INSN_ADD_PSP:
1643			s[dst] = state->psp + val;
1644			break;
1645
1646		      case UNW_INSN_ADD_SP:
1647			s[dst] = state->sp + val;
1648			break;
1649
1650		      case UNW_INSN_SETNAT_MEMSTK:
1651			if (!state->pri_unat_loc)
1652				state->pri_unat_loc = &state->sw->ar_unat;
1653			/* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1654			s[dst+1] = (*state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1655			break;
1656
1657		      case UNW_INSN_SETNAT_TYPE:
1658			s[dst+1] = val;
1659			break;
1660
1661		      case UNW_INSN_LOAD:
1662#if UNW_DEBUG
1663			if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1664			    || s[val] < TASK_SIZE)
1665			{
1666				debug(1, "unwind: rejecting bad psp=0x%lx\n", s[val]);
1667				break;
1668			}
1669#endif
1670			s[dst] = *(unsigned long *) s[val];
1671			break;
1672		}
1673	}
1674	STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1675	return;
1676
1677  lazy_init:
1678	off = unw.sw_off[val];
1679	s[val] = (unsigned long) state->sw + off;
1680	if (off >= struct_offset(struct switch_stack, r4)
1681	    && off <= struct_offset(struct switch_stack, r7))
1682		/*
1683		 * We're initializing a general register: init NaT info, too.  Note that
1684		 * the offset is a multiple of 8 which gives us the 3 bits needed for
1685		 * the type field.
1686		 */
1687		s[val+1] = (struct_offset(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1688	goto redo;
1689}
1690
1691static int
1692find_save_locs (struct unw_frame_info *info)
1693{
1694	int have_write_lock = 0;
1695	struct unw_script *scr;
1696
1697	if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1698		/* don't let obviously bad addresses pollute the cache */
1699		debug(1, "unwind: rejecting bad ip=0x%lx\n", info->ip);
1700		info->rp_loc = 0;
1701		return -1;
1702	}
1703
1704	scr = script_lookup(info);
1705	if (!scr) {
1706		scr = build_script(info);
1707		if (!scr) {
1708			dprintk("unwind: failed to locate/build unwind script for ip %lx\n",
1709				info->ip);
1710			return -1;
1711		}
1712		have_write_lock = 1;
1713	}
1714	info->hint = scr->hint;
1715	info->prev_script = scr - unw.cache;
1716
1717	run_script(scr, info);
1718
1719	if (have_write_lock)
1720		write_unlock(&scr->lock);
1721	else
1722		read_unlock(&scr->lock);
1723	return 0;
1724}
1725
1726int
1727unw_unwind (struct unw_frame_info *info)
1728{
1729	unsigned long prev_ip, prev_sp, prev_bsp;
1730	unsigned long ip, pr, num_regs;
1731	STAT(unsigned long start, flags;)
1732	int retval;
1733
1734	STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1735
1736	prev_ip = info->ip;
1737	prev_sp = info->sp;
1738	prev_bsp = info->bsp;
1739
1740	/* restore the ip */
1741	if (!info->rp_loc) {
1742		debug(1, "unwind: failed to locate return link (ip=0x%lx)!\n", info->ip);
1743		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1744		return -1;
1745	}
1746	ip = info->ip = *info->rp_loc;
1747	if (ip < GATE_ADDR + PAGE_SIZE) {
1748		/*
1749		 * We don't have unwind info for the gate page, so we consider that part
1750		 * of user-space for the purpose of unwinding.
1751		 */
1752		debug(1, "unwind: reached user-space (ip=0x%lx)\n", ip);
1753		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1754		return -1;
1755	}
1756
1757	/* restore the cfm: */
1758	if (!info->pfs_loc) {
1759		dprintk("unwind: failed to locate ar.pfs!\n");
1760		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1761		return -1;
1762	}
1763	info->cfm_loc = info->pfs_loc;
1764
1765	/* restore the bsp: */
1766	pr = info->pr;
1767	num_regs = 0;
1768	if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1769		if ((pr & (1UL << pNonSys)) != 0)
1770			num_regs = *info->cfm_loc & 0x7f;		/* size of frame */
1771		info->pfs_loc =
1772			(unsigned long *) (info->sp + 16 + struct_offset(struct pt_regs, ar_pfs));
1773	} else
1774		num_regs = (*info->cfm_loc >> 7) & 0x7f;	/* size of locals */
1775	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1776	if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1777		dprintk("unwind: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1778			info->bsp, info->regstk.limit, info->regstk.top);
1779		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1780		return -1;
1781	}
1782
1783	/* restore the sp: */
1784	info->sp = info->psp;
1785	if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1786		dprintk("unwind: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1787			info->sp, info->memstk.top, info->memstk.limit);
1788		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1789		return -1;
1790	}
1791
1792	if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1793		dprintk("unwind: ip, sp, bsp remain unchanged; stopping here (ip=0x%lx)\n", ip);
1794		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1795		return -1;
1796	}
1797
1798	/* as we unwind, the saved ar.unat becomes the primary unat: */
1799	info->pri_unat_loc = info->unat_loc;
1800
1801	/* finally, restore the predicates: */
1802	unw_get_pr(info, &info->pr);
1803
1804	retval = find_save_locs(info);
1805	STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1806	return retval;
1807}
1808
1809int
1810unw_unwind_to_user (struct unw_frame_info *info)
1811{
1812	unsigned long ip;
1813
1814	while (unw_unwind(info) >= 0) {
1815		if (unw_get_rp(info, &ip) < 0) {
1816			unw_get_ip(info, &ip);
1817			dprintk("unwind: failed to read return pointer (ip=0x%lx)\n", ip);
1818			return -1;
1819		}
1820		/*
1821		 * We don't have unwind info for the gate page, so we consider that part
1822		 * of user-space for the purpose of unwinding.
1823		 */
1824		if (ip < GATE_ADDR + PAGE_SIZE)
1825			return 0;
1826	}
1827	unw_get_ip(info, &ip);
1828	dprintk("unwind: failed to unwind to user-level (ip=0x%lx)\n", ip);
1829	return -1;
1830}
1831
1832void
1833unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
1834{
1835	unsigned long rbslimit, rbstop, stklimit, stktop, sol;
1836	STAT(unsigned long start, flags;)
1837
1838	STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
1839
1840	/*
1841	 * Subtle stuff here: we _could_ unwind through the
1842	 * switch_stack frame but we don't want to do that because it
1843	 * would be slow as each preserved register would have to be
1844	 * processed.  Instead, what we do here is zero out the frame
1845	 * info and start the unwind process at the function that
1846	 * created the switch_stack frame.  When a preserved value in
1847	 * switch_stack needs to be accessed, run_script() will
1848	 * initialize the appropriate pointer on demand.
1849	 */
1850	memset(info, 0, sizeof(*info));
1851
1852	rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
1853	rbstop   = sw->ar_bspstore;
1854	if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
1855		rbstop = rbslimit;
1856
1857	stklimit = (unsigned long) t + IA64_STK_OFFSET;
1858	stktop   = (unsigned long) sw - 16;
1859	if (stktop <= rbstop)
1860		stktop = rbstop;
1861
1862	info->regstk.limit = rbslimit;
1863	info->regstk.top   = rbstop;
1864	info->memstk.limit = stklimit;
1865	info->memstk.top   = stktop;
1866	info->task = t;
1867	info->sw  = sw;
1868	info->sp = info->psp = (unsigned long) (sw + 1) - 16;
1869	info->cfm_loc = &sw->ar_pfs;
1870	sol = (*info->cfm_loc >> 7) & 0x7f;
1871	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
1872	info->ip = sw->b0;
1873	info->pr = sw->pr;
1874
1875	find_save_locs(info);
1876	STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
1877}
1878
1879void
1880unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
1881{
1882	struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
1883
1884	unw_init_frame_info(info, t, sw);
1885}
1886
1887static void
1888init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
1889		   unsigned long gp, const void *table_start, const void *table_end)
1890{
1891	const struct unw_table_entry *start = table_start, *end = table_end;
1892
1893	table->name = name;
1894	table->segment_base = segment_base;
1895	table->gp = gp;
1896	table->start = segment_base + start[0].start_offset;
1897	table->end = segment_base + end[-1].end_offset;
1898	table->array = start;
1899	table->length = end - start;
1900}
1901
1902void *
1903unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
1904		      const void *table_start, const void *table_end)
1905{
1906	const struct unw_table_entry *start = table_start, *end = table_end;
1907	struct unw_table *table;
1908	unsigned long flags;
1909
1910	if (end - start <= 0) {
1911		dprintk("unwind: ignoring attempt to insert empty unwind table\n");
1912		return 0;
1913	}
1914
1915	table = kmalloc(sizeof(*table), GFP_USER);
1916	if (!table)
1917		return 0;
1918
1919	init_unwind_table(table, name, segment_base, gp, table_start, table_end);
1920
1921	spin_lock_irqsave(&unw.lock, flags);
1922	{
1923		/* keep kernel unwind table at the front (it's searched most commonly): */
1924		table->next = unw.tables->next;
1925		unw.tables->next = table;
1926	}
1927	spin_unlock_irqrestore(&unw.lock, flags);
1928
1929	return table;
1930}
1931
1932void
1933unw_remove_unwind_table (void *handle)
1934{
1935	struct unw_table *table, *prev;
1936	struct unw_script *tmp;
1937	unsigned long flags;
1938	long index;
1939
1940	if (!handle) {
1941		dprintk("unwind: ignoring attempt to remove non-existent unwind table\n");
1942		return;
1943	}
1944
1945	table = handle;
1946	if (table == &unw.kernel_table) {
1947		dprintk("unwind: sorry, freeing the kernel's unwind table is a no-can-do!\n");
1948		return;
1949	}
1950
1951	spin_lock_irqsave(&unw.lock, flags);
1952	{
1953		/* first, delete the table: */
1954
1955		for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
1956			if (prev->next == table)
1957				break;
1958		if (!prev) {
1959			dprintk("unwind: failed to find unwind table %p\n", (void *) table);
1960			spin_unlock_irqrestore(&unw.lock, flags);
1961			return;
1962		}
1963		prev->next = table->next;
1964	}
1965	spin_unlock_irqrestore(&unw.lock, flags);
1966
1967	/* next, remove hash table entries for this table */
1968
1969	for (index = 0; index <= UNW_HASH_SIZE; ++index) {
1970		tmp = unw.cache + unw.hash[index];
1971		if (unw.hash[index] >= UNW_CACHE_SIZE
1972		    || tmp->ip < table->start || tmp->ip >= table->end)
1973			continue;
1974
1975		write_lock(&tmp->lock);
1976		{
1977			if (tmp->ip >= table->start && tmp->ip < table->end) {
1978				unw.hash[index] = tmp->coll_chain;
1979				tmp->ip = 0;
1980			}
1981		}
1982		write_unlock(&tmp->lock);
1983	}
1984
1985	kfree(table);
1986}
1987
1988void
1989unw_create_gate_table (void)
1990{
1991	extern char __start_gate_section[], __stop_gate_section[];
1992	unsigned long *lp, start, end, segbase = unw.kernel_table.segment_base;
1993	const struct unw_table_entry *entry, *first;
1994	size_t info_size, size;
1995	char *info;
1996
1997	start = (unsigned long) __start_gate_section - segbase;
1998	end   = (unsigned long) __stop_gate_section - segbase;
1999	size  = 0;
2000	first = lookup(&unw.kernel_table, start);
2001
2002	for (entry = first; entry->start_offset < end; ++entry)
2003		size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2004	size += 8;	/* reserve space for "end of table" marker */
2005
2006	unw.gate_table = alloc_bootmem(size);
2007	if (!unw.gate_table) {
2008		unw.gate_table_size = 0;
2009		printk("unwind: unable to create unwind data for gate page!\n");
2010		return;
2011	}
2012	unw.gate_table_size = size;
2013
2014	lp = unw.gate_table;
2015	info = (char *) unw.gate_table + size;
2016
2017	for (entry = first; entry->start_offset < end; ++entry, lp += 3) {
2018		info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2019		info -= info_size;
2020		memcpy(info, (char *) segbase + entry->info_offset, info_size);
2021
2022		lp[0] = entry->start_offset - start + GATE_ADDR;	/* start */
2023		lp[1] = entry->end_offset - start + GATE_ADDR;		/* end */
2024		lp[2] = info - (char *) unw.gate_table;			/* info */
2025	}
2026	*lp = 0;	/* end-of-table marker */
2027}
2028
2029void
2030unw_init (void)
2031{
2032	extern int ia64_unw_start, ia64_unw_end, __gp;
2033	extern void unw_hash_index_t_is_too_narrow (void);
2034	long i, off;
2035
2036	if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2037		unw_hash_index_t_is_too_narrow();
2038
2039	unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(AR_UNAT);
2040	unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2041	unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_UNAT);
2042	unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2043	unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(AR_UNAT);
2044	unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2045	unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2046	unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2047	for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2048		unw.sw_off[unw.preg_index[i]] = off;
2049	for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2050		unw.sw_off[unw.preg_index[i]] = off;
2051	for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2052		unw.sw_off[unw.preg_index[i]] = off;
2053	for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2054		unw.sw_off[unw.preg_index[i]] = off;
2055
2056	for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2057		if (i > 0)
2058			unw.cache[i].lru_chain = (i - 1);
2059		unw.cache[i].coll_chain = -1;
2060		unw.cache[i].lock = RW_LOCK_UNLOCKED;
2061	}
2062	unw.lru_head = UNW_CACHE_SIZE - 1;
2063	unw.lru_tail = 0;
2064
2065	init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) &__gp,
2066			  &ia64_unw_start, &ia64_unw_end);
2067}
2068
2069/*
2070 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2071 * the size of the unwind data.  If BUF_SIZE is smaller than the size of the unwind data
2072 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2073 * unwind data.
2074 *
2075 * The first portion of the unwind data contains an unwind table and rest contains the
2076 * associated unwind info (in no particular order).  The unwind table consists of a table
2077 * of entries of the form:
2078 *
2079 *	u64 start;	(64-bit address of start of function)
2080 *	u64 end;	(64-bit address of start of function)
2081 *	u64 info;	(BUF-relative offset to unwind info)
2082 *
2083 * The end of the unwind table is indicated by an entry with a START address of zero.
2084 *
2085 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2086 * on the format of the unwind info.
2087 *
2088 * ERRORS
2089 *	EFAULT	BUF points outside your accessible address space.
2090 */
2091asmlinkage long
2092sys_getunwind (void *buf, size_t buf_size)
2093{
2094	if (buf && buf_size >= unw.gate_table_size)
2095		if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2096			return -EFAULT;
2097	return unw.gate_table_size;
2098}
2099