1/* $Id: ate_utils.c,v 1.1.1.1 2008/10/15 03:26:03 james26_jang Exp $
2 *
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License.  See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
8 */
9
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <asm/sn/sgi.h>
13#include <asm/sn/addrs.h>
14#include <asm/sn/arch.h>
15#include <asm/sn/iograph.h>
16#include <asm/sn/invent.h>
17#include <asm/sn/io.h>
18#include <asm/sn/hcl.h>
19#include <asm/sn/labelcl.h>
20#include <asm/sn/xtalk/xwidget.h>
21#include <asm/sn/pci/bridge.h>
22#include <asm/sn/pci/pciio.h>
23#include <asm/sn/pci/pcibr.h>
24#include <asm/sn/pci/pcibr_private.h>
25#include <asm/sn/pci/pci_defs.h>
26#include <asm/sn/prio.h>
27#include <asm/sn/ioerror_handling.h>
28#include <asm/sn/xtalk/xbow.h>
29#include <asm/sn/ioc3.h>
30#include <asm/sn/eeprom.h>
31#include <asm/sn/sn_private.h>
32
33#include <asm/sn/ate_utils.h>
34
35/*
36 * Allocate the map needed to allocate the ATE entries.
37 */
38struct map *
39atemapalloc(ulong_t mapsiz)
40{
41	struct map *mp;
42	ulong_t size;
43	struct a {
44		spinlock_t lock;
45		sv_t 	sema;
46	} *sync;
47
48	if (mapsiz == 0)
49		return(NULL);
50	size = sizeof(struct map) * (mapsiz + 2);
51	if ((mp = (struct map *) kmalloc(size, GFP_KERNEL)) == NULL)
52		return(NULL);
53	memset(mp, 0x0, size);
54
55	sync = kmalloc(sizeof(struct a), GFP_KERNEL);
56	if (sync == NULL) {
57		kfree(mp);
58		return(NULL);
59	}
60	memset(sync, 0x0, sizeof(struct a));
61
62	mutex_spinlock_init(&sync->lock);
63	sv_init( &(sync->sema), &(sync->lock), SV_MON_SPIN | SV_ORDER_FIFO /*| SV_INTS*/);
64	mp[1].m_size = (unsigned long) &sync->lock;
65	mp[1].m_addr = (unsigned long) &sync->sema;
66	mapsize(mp) = mapsiz - 1;
67	return(mp);
68}
69
70/*
71 * free a map structure previously allocated via rmallocmap().
72 */
73void
74atemapfree(struct map *mp)
75{
76	struct a {
77		spinlock_t lock;
78		sv_t 	sema;
79	};
80	/* ASSERT(sv_waitq(mapout(mp)) == 0); */
81	/* sv_destroy(mapout(mp)); */
82	spin_lock_destroy(maplock(mp));
83	kfree((void *)mp[1].m_size);
84	kfree(mp);
85}
86
87/*
88 * Allocate 'size' units from the given map.
89 * Return the base of the allocated space.
90 * In a map, the addresses are increasing and the
91 * list is terminated by a 0 size.
92 * Algorithm is first-fit.
93 */
94
95ulong_t
96atealloc(
97	struct map *mp,
98	size_t size)
99{
100	register unsigned int a;
101	register struct map *bp;
102	register unsigned long s;
103
104	ASSERT(size >= 0);
105
106	if (size == 0)
107		return((ulong_t) NULL);
108
109	s = mutex_spinlock(maplock(mp));
110
111	for (bp = mapstart(mp); bp->m_size; bp++) {
112		if (bp->m_size >= size) {
113			a = bp->m_addr;
114			bp->m_addr += size;
115			if ((bp->m_size -= size) == 0) {
116				do {
117					bp++;
118					(bp-1)->m_addr = bp->m_addr;
119				} while ((((bp-1)->m_size) = (bp->m_size)));
120				mapsize(mp)++;
121			}
122
123			ASSERT(bp->m_size < 0x80000000);
124			mutex_spinunlock(maplock(mp), s);
125			return(a);
126		}
127	}
128
129	/*
130	 * We did not get what we need .. we cannot sleep ..
131	 */
132	mutex_spinunlock(maplock(mp), s);
133	return(0);
134}
135
136/*
137 * Free the previously allocated space a of size units into the specified map.
138 * Sort ``a'' into map and combine on one or both ends if possible.
139 * Returns 0 on success, 1 on failure.
140 */
141void
142atefree(struct map *mp, size_t size, ulong_t a)
143{
144	register struct map *bp;
145	register unsigned int t;
146	register unsigned long s;
147
148	ASSERT(size >= 0);
149
150	if (size == 0)
151		return;
152
153	bp = mapstart(mp);
154	s = mutex_spinlock(maplock(mp));
155
156	for ( ; bp->m_addr<=a && bp->m_size!=0; bp++)
157		;
158	if (bp>mapstart(mp) && (bp-1)->m_addr+(bp-1)->m_size == a) {
159		(bp-1)->m_size += size;
160		if (bp->m_addr) {
161			/* m_addr==0 end of map table */
162			ASSERT(a+size <= bp->m_addr);
163			if (a+size == bp->m_addr) {
164
165				/* compress adjacent map addr entries */
166				(bp-1)->m_size += bp->m_size;
167				while (bp->m_size) {
168					bp++;
169					(bp-1)->m_addr = bp->m_addr;
170					(bp-1)->m_size = bp->m_size;
171				}
172				mapsize(mp)++;
173			}
174		}
175	} else {
176		if (a+size == bp->m_addr && bp->m_size) {
177			bp->m_addr -= size;
178			bp->m_size += size;
179		} else {
180			ASSERT(size);
181			if (mapsize(mp) == 0) {
182				mutex_spinunlock(maplock(mp), s);
183				printk("atefree : map overflow 0x%p Lost 0x%lx items at 0x%lx",
184						(void *)mp, size, a) ;
185				return ;
186			}
187			do {
188				t = bp->m_addr;
189				bp->m_addr = a;
190				a = t;
191				t = bp->m_size;
192				bp->m_size = size;
193				bp++;
194			} while ((size = t));
195			mapsize(mp)--;
196		}
197	}
198	mutex_spinunlock(maplock(mp), s);
199	/*
200	 * wake up everyone waiting for space
201	 */
202	if (mapout(mp))
203		;
204		/* sv_broadcast(mapout(mp)); */
205}
206