1/*
2 * inventory.c
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
10 * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
11 *
12 * These are the routines to discover what hardware exists in this box.
13 * This task is complicated by there being 3 different ways of
14 * performing an inventory, depending largely on the age of the box.
15 * The recommended way to do this is to check to see whether the machine
16 * is a `Snake' first, then try System Map, then try PAT.  We try System
17 * Map before checking for a Snake -- this probably doesn't cause any
18 * problems, but...
19 */
20
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <asm/hardware.h>
27#include <asm/io.h>
28#include <asm/mmzone.h>
29#include <asm/pdc.h>
30#include <asm/pdcpat.h>
31#include <asm/processor.h>
32#include <asm/page.h>
33#include <asm/parisc-device.h>
34
35/*
36** Debug options
37** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
38*/
39#undef DEBUG_PAT
40
41int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
42
43void __init setup_pdc(void)
44{
45	long status;
46	unsigned int bus_id;
47	struct pdc_system_map_mod_info module_result;
48	struct pdc_module_path module_path;
49	struct pdc_model model;
50#ifdef CONFIG_64BIT
51	struct pdc_pat_cell_num cell_info;
52#endif
53
54	/* Determine the pdc "type" used on this machine */
55
56	printk(KERN_INFO "Determining PDC firmware type: ");
57
58	status = pdc_system_map_find_mods(&module_result, &module_path, 0);
59	if (status == PDC_OK) {
60		pdc_type = PDC_TYPE_SYSTEM_MAP;
61		printk("System Map.\n");
62		return;
63	}
64
65	/*
66	 * If the machine doesn't support PDC_SYSTEM_MAP then either it
67	 * is a pdc pat box, or it is an older box. All 64 bit capable
68	 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
69	 */
70
71	/*
72	 * TODO: We should test for 64 bit capability and give a
73	 * clearer message.
74	 */
75
76#ifdef CONFIG_64BIT
77	status = pdc_pat_cell_get_number(&cell_info);
78	if (status == PDC_OK) {
79		pdc_type = PDC_TYPE_PAT;
80		printk("64 bit PAT.\n");
81		return;
82	}
83#endif
84
85	/* Check the CPU's bus ID.  There's probably a better test.  */
86
87	status = pdc_model_info(&model);
88
89	bus_id = (model.hversion >> (4 + 7)) & 0x1f;
90
91	switch (bus_id) {
92	case 0x4:		/* 720, 730, 750, 735, 755 */
93	case 0x6:		/* 705, 710 */
94	case 0x7:		/* 715, 725 */
95	case 0x8:		/* 745, 747, 742 */
96	case 0xA:		/* 712 and similiar */
97	case 0xC:		/* 715/64, at least */
98
99		pdc_type = PDC_TYPE_SNAKE;
100		printk("Snake.\n");
101		return;
102
103	default:		/* Everything else */
104
105		printk("Unsupported.\n");
106		panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
107	}
108}
109
110#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
111
112static void __init
113set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
114	       unsigned long pages4k)
115{
116	/* Rather than aligning and potentially throwing away
117	 * memory, we'll assume that any ranges are already
118	 * nicely aligned with any reasonable page size, and
119	 * panic if they are not (it's more likely that the
120	 * pdc info is bad in this case).
121	 */
122
123	if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
124	    || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
125
126		panic("Memory range doesn't align with page size!\n");
127	}
128
129	pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
130	pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
131}
132
133static void __init pagezero_memconfig(void)
134{
135	unsigned long npages;
136
137	/* Use the 32 bit information from page zero to create a single
138	 * entry in the pmem_ranges[] table.
139	 *
140	 * We currently don't support machines with contiguous memory
141	 * >= 4 Gb, who report that memory using 64 bit only fields
142	 * on page zero. It's not worth doing until it can be tested,
143	 * and it is not clear we can support those machines for other
144	 * reasons.
145	 *
146	 * If that support is done in the future, this is where it
147	 * should be done.
148	 */
149
150	npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
151	set_pmem_entry(pmem_ranges,0UL,npages);
152	npmem_ranges = 1;
153}
154
155#ifdef CONFIG_64BIT
156
157/* All of the PDC PAT specific code is 64-bit only */
158
159/*
160**  The module object is filled via PDC_PAT_CELL[Return Cell Module].
161**  If a module is found, register module will get the IODC bytes via
162**  pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
163**
164**  The IO view can be used by PDC_PAT_CELL[Return Cell Module]
165**  only for SBAs and LBAs.  This view will cause an invalid
166**  argument error for all other cell module types.
167**
168*/
169
170static int __init
171pat_query_module(ulong pcell_loc, ulong mod_index)
172{
173	pdc_pat_cell_mod_maddr_block_t pa_pdc_cell;
174	unsigned long bytecnt;
175	unsigned long temp;	/* 64-bit scratch value */
176	long status;		/* PDC return value status */
177	struct parisc_device *dev;
178
179	/* return cell module (PA or Processor view) */
180	status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
181				     PA_VIEW, &pa_pdc_cell);
182
183	if (status != PDC_OK) {
184		/* no more cell modules or error */
185		return status;
186	}
187
188	temp = pa_pdc_cell.cba;
189	dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path);
190	if (!dev) {
191		return PDC_OK;
192	}
193
194	/* alloc_pa_dev sets dev->hpa */
195
196	/*
197	** save parameters in the parisc_device
198	** (The idea being the device driver will call pdc_pat_cell_module()
199	** and store the results in its own data structure.)
200	*/
201	dev->pcell_loc = pcell_loc;
202	dev->mod_index = mod_index;
203
204	/* save generic info returned from the call */
205	/* REVISIT: who is the consumer of this? not sure yet... */
206	dev->mod_info = pa_pdc_cell.mod_info;	/* pass to PAT_GET_ENTITY() */
207	dev->pmod_loc = pa_pdc_cell.mod_location;
208
209	register_parisc_device(dev);	/* advertise device */
210
211#ifdef DEBUG_PAT
212	pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
213	/* dump what we see so far... */
214	switch (PAT_GET_ENTITY(dev->mod_info)) {
215		unsigned long i;
216
217	case PAT_ENTITY_PROC:
218		printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
219			pa_pdc_cell.mod[0]);
220		break;
221
222	case PAT_ENTITY_MEM:
223		printk(KERN_DEBUG
224			"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
225			pa_pdc_cell.mod[0], pa_pdc_cell.mod[1],
226			pa_pdc_cell.mod[2]);
227		break;
228	case PAT_ENTITY_CA:
229		printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
230		break;
231
232	case PAT_ENTITY_PBC:
233		printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
234		goto print_ranges;
235
236	case PAT_ENTITY_SBA:
237		printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
238		goto print_ranges;
239
240	case PAT_ENTITY_LBA:
241		printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
242
243 print_ranges:
244		pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
245				    IO_VIEW, &io_pdc_cell);
246		printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell.mod[1]);
247		for (i = 0; i < pa_pdc_cell.mod[1]; i++) {
248			printk(KERN_DEBUG
249				"  PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
250				i, pa_pdc_cell.mod[2 + i * 3],	/* type */
251				pa_pdc_cell.mod[3 + i * 3],	/* start */
252				pa_pdc_cell.mod[4 + i * 3]);	/* finish (ie end) */
253			printk(KERN_DEBUG
254				"  IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
255				i, io_pdc_cell.mod[2 + i * 3],	/* type */
256				io_pdc_cell.mod[3 + i * 3],	/* start */
257				io_pdc_cell.mod[4 + i * 3]);	/* finish (ie end) */
258		}
259		printk(KERN_DEBUG "\n");
260		break;
261	}
262#endif /* DEBUG_PAT */
263	return PDC_OK;
264}
265
266
267/* pat pdc can return information about a variety of different
268 * types of memory (e.g. firmware,i/o, etc) but we only care about
269 * the usable physical ram right now. Since the firmware specific
270 * information is allocated on the stack, we'll be generous, in
271 * case there is a lot of other information we don't care about.
272 */
273
274#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
275
276static void __init pat_memconfig(void)
277{
278	unsigned long actual_len;
279	struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
280	struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
281	physmem_range_t *pmem_ptr;
282	long status;
283	int entries;
284	unsigned long length;
285	int i;
286
287	length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
288
289	status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
290
291	if ((status != PDC_OK)
292	    || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
293
294		/* The above pdc call shouldn't fail, but, just in
295		 * case, just use the PAGE0 info.
296		 */
297
298		printk("\n\n\n");
299		printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
300			"All memory may not be used!\n\n\n");
301		pagezero_memconfig();
302		return;
303	}
304
305	entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
306
307	if (entries > PAT_MAX_RANGES) {
308		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
309		printk(KERN_WARNING "Some memory may not be used!\n");
310	}
311
312	/* Copy information into the firmware independent pmem_ranges
313	 * array, skipping types we don't care about. Notice we said
314	 * "may" above. We'll use all the entries that were returned.
315	 */
316
317	npmem_ranges = 0;
318	mtbl_ptr = mem_table;
319	pmem_ptr = pmem_ranges; /* Global firmware independent table */
320	for (i = 0; i < entries; i++,mtbl_ptr++) {
321		if (   (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
322		    || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
323		    || (mtbl_ptr->pages == 0)
324		    || (   (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
325			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
326			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
327
328			continue;
329		}
330
331		if (npmem_ranges == MAX_PHYSMEM_RANGES) {
332			printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
333			printk(KERN_WARNING "Some memory will not be used!\n");
334			break;
335		}
336
337		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
338		npmem_ranges++;
339	}
340}
341
342static int __init pat_inventory(void)
343{
344	int status;
345	ulong mod_index = 0;
346	struct pdc_pat_cell_num cell_info;
347
348	/*
349	** Note:  Prelude (and it's successors: Lclass, A400/500) only
350	**        implement PDC_PAT_CELL sub-options 0 and 2.
351	*/
352	status = pdc_pat_cell_get_number(&cell_info);
353	if (status != PDC_OK) {
354		return 0;
355	}
356
357#ifdef DEBUG_PAT
358	printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
359	       cell_info.cell_loc);
360#endif
361
362	while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
363		mod_index++;
364	}
365
366	return mod_index;
367}
368
369/* We only look for extended memory ranges on a 64 bit capable box */
370static void __init sprockets_memconfig(void)
371{
372	struct pdc_memory_table_raddr r_addr;
373	struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
374	struct pdc_memory_table *mtbl_ptr;
375	physmem_range_t *pmem_ptr;
376	long status;
377	int entries;
378	int i;
379
380	status = pdc_mem_mem_table(&r_addr,mem_table,
381				(unsigned long)MAX_PHYSMEM_RANGES);
382
383	if (status != PDC_OK) {
384
385		/* The above pdc call only works on boxes with sprockets
386		 * firmware (newer B,C,J class). Other non PAT PDC machines
387		 * do support more than 3.75 Gb of memory, but we don't
388		 * support them yet.
389		 */
390
391		pagezero_memconfig();
392		return;
393	}
394
395	if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
396		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
397		printk(KERN_WARNING "Some memory will not be used!\n");
398	}
399
400	entries = (int)r_addr.entries_returned;
401
402	npmem_ranges = 0;
403	mtbl_ptr = mem_table;
404	pmem_ptr = pmem_ranges; /* Global firmware independent table */
405	for (i = 0; i < entries; i++,mtbl_ptr++) {
406		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
407		npmem_ranges++;
408	}
409}
410
411#else   /* !CONFIG_64BIT */
412
413#define pat_inventory() do { } while (0)
414#define pat_memconfig() do { } while (0)
415#define sprockets_memconfig() pagezero_memconfig()
416
417#endif	/* !CONFIG_64BIT */
418
419
420#ifndef CONFIG_PA20
421
422/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
423
424static struct parisc_device * __init
425legacy_create_device(struct pdc_memory_map *r_addr,
426		struct pdc_module_path *module_path)
427{
428	struct parisc_device *dev;
429	int status = pdc_mem_map_hpa(r_addr, module_path);
430	if (status != PDC_OK)
431		return NULL;
432
433	dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
434	if (dev == NULL)
435		return NULL;
436
437	register_parisc_device(dev);
438	return dev;
439}
440
441/**
442 * snake_inventory
443 *
444 * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
445 * To use it, we initialise the mod_path.bc to 0xff and try all values of
446 * mod to get the HPA for the top-level devices.  Bus adapters may have
447 * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
448 * module, then trying all possible functions.
449 */
450static void __init snake_inventory(void)
451{
452	int mod;
453	for (mod = 0; mod < 16; mod++) {
454		struct parisc_device *dev;
455		struct pdc_module_path module_path;
456		struct pdc_memory_map r_addr;
457		unsigned int func;
458
459		memset(module_path.path.bc, 0xff, 6);
460		module_path.path.mod = mod;
461		dev = legacy_create_device(&r_addr, &module_path);
462		if ((!dev) || (dev->id.hw_type != HPHW_BA))
463			continue;
464
465		memset(module_path.path.bc, 0xff, 4);
466		module_path.path.bc[4] = mod;
467
468		for (func = 0; func < 16; func++) {
469			module_path.path.bc[5] = 0;
470			module_path.path.mod = func;
471			legacy_create_device(&r_addr, &module_path);
472		}
473	}
474}
475
476#else /* CONFIG_PA20 */
477#define snake_inventory() do { } while (0)
478#endif  /* CONFIG_PA20 */
479
480/* Common 32/64 bit based code goes here */
481
482/**
483 * add_system_map_addresses - Add additional addresses to the parisc device.
484 * @dev: The parisc device.
485 * @num_addrs: Then number of addresses to add;
486 * @module_instance: The system_map module instance.
487 *
488 * This function adds any additional addresses reported by the system_map
489 * firmware to the parisc device.
490 */
491static void __init
492add_system_map_addresses(struct parisc_device *dev, int num_addrs,
493			 int module_instance)
494{
495	int i;
496	long status;
497	struct pdc_system_map_addr_info addr_result;
498
499	dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
500	if(!dev->addr) {
501		printk(KERN_ERR "%s %s(): memory allocation failure\n",
502		       __FILE__, __FUNCTION__);
503		return;
504	}
505
506	for(i = 1; i <= num_addrs; ++i) {
507		status = pdc_system_map_find_addrs(&addr_result,
508						   module_instance, i);
509		if(PDC_OK == status) {
510			dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
511			dev->num_addrs++;
512		} else {
513			printk(KERN_WARNING
514			       "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
515			       status, i);
516		}
517	}
518}
519
520/**
521 * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
522 *
523 * This function attempts to retrieve and register all the devices firmware
524 * knows about via the SYSTEM_MAP PDC call.
525 */
526static void __init system_map_inventory(void)
527{
528	int i;
529	long status = PDC_OK;
530
531	for (i = 0; i < 256; i++) {
532		struct parisc_device *dev;
533		struct pdc_system_map_mod_info module_result;
534		struct pdc_module_path module_path;
535
536		status = pdc_system_map_find_mods(&module_result,
537				&module_path, i);
538		if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
539			break;
540		if (status != PDC_OK)
541			continue;
542
543		dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
544		if (!dev)
545			continue;
546
547		register_parisc_device(dev);
548
549		/* if available, get the additional addresses for a module */
550		if (!module_result.add_addrs)
551			continue;
552
553		add_system_map_addresses(dev, module_result.add_addrs, i);
554	}
555
556	walk_central_bus();
557	return;
558}
559
560void __init do_memory_inventory(void)
561{
562	switch (pdc_type) {
563
564	case PDC_TYPE_PAT:
565		pat_memconfig();
566		break;
567
568	case PDC_TYPE_SYSTEM_MAP:
569		sprockets_memconfig();
570		break;
571
572	case PDC_TYPE_SNAKE:
573		pagezero_memconfig();
574		return;
575
576	default:
577		panic("Unknown PDC type!\n");
578	}
579
580	if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
581		printk(KERN_WARNING "Bad memory configuration returned!\n");
582		printk(KERN_WARNING "Some memory may not be used!\n");
583		pagezero_memconfig();
584	}
585}
586
587void __init do_device_inventory(void)
588{
589	printk(KERN_INFO "Searching for devices...\n");
590
591	init_parisc_bus();
592
593	switch (pdc_type) {
594
595	case PDC_TYPE_PAT:
596		pat_inventory();
597		break;
598
599	case PDC_TYPE_SYSTEM_MAP:
600		system_map_inventory();
601		break;
602
603	case PDC_TYPE_SNAKE:
604		snake_inventory();
605		break;
606
607	default:
608		panic("Unknown PDC type!\n");
609	}
610	printk(KERN_INFO "Found devices:\n");
611	print_parisc_devices();
612}
613