1 /*
2 * inventory.c
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
10 * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
11 *
12 * These are the routines to discover what hardware exists in this box.
13 * This task is complicated by there being 3 different ways of
14 * performing an inventory, depending largely on the age of the box.
15 * The recommended way to do this is to check to see whether the machine
16 * is a `Snake' first, then try System Map, then try PAT. We try System
17 * Map before checking for a Snake -- this probably doesn't cause any
18 * problems, but...
19 */
20
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <asm/hardware.h>
27 #include <asm/io.h>
28 #include <asm/mmzone.h>
29 #include <asm/pdc.h>
30 #include <asm/pdcpat.h>
31 #include <asm/processor.h>
32 #include <asm/page.h>
33 #include <asm/parisc-device.h>
34
35 /*
36 ** Debug options
37 ** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
38 */
39 #undef DEBUG_PAT
40
41 int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
42
setup_pdc(void)43 void __init setup_pdc(void)
44 {
45 long status;
46 unsigned int bus_id;
47 struct pdc_system_map_mod_info module_result;
48 struct pdc_module_path module_path;
49 struct pdc_model model;
50 #ifdef CONFIG_64BIT
51 struct pdc_pat_cell_num cell_info;
52 #endif
53
54 /* Determine the pdc "type" used on this machine */
55
56 printk(KERN_INFO "Determining PDC firmware type: ");
57
58 status = pdc_system_map_find_mods(&module_result, &module_path, 0);
59 if (status == PDC_OK) {
60 pdc_type = PDC_TYPE_SYSTEM_MAP;
61 printk("System Map.\n");
62 return;
63 }
64
65 /*
66 * If the machine doesn't support PDC_SYSTEM_MAP then either it
67 * is a pdc pat box, or it is an older box. All 64 bit capable
68 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
69 */
70
71 /*
72 * TODO: We should test for 64 bit capability and give a
73 * clearer message.
74 */
75
76 #ifdef CONFIG_64BIT
77 status = pdc_pat_cell_get_number(&cell_info);
78 if (status == PDC_OK) {
79 pdc_type = PDC_TYPE_PAT;
80 printk("64 bit PAT.\n");
81 return;
82 }
83 #endif
84
85 /* Check the CPU's bus ID. There's probably a better test. */
86
87 status = pdc_model_info(&model);
88
89 bus_id = (model.hversion >> (4 + 7)) & 0x1f;
90
91 switch (bus_id) {
92 case 0x4: /* 720, 730, 750, 735, 755 */
93 case 0x6: /* 705, 710 */
94 case 0x7: /* 715, 725 */
95 case 0x8: /* 745, 747, 742 */
96 case 0xA: /* 712 and similar */
97 case 0xC: /* 715/64, at least */
98
99 pdc_type = PDC_TYPE_SNAKE;
100 printk("Snake.\n");
101 return;
102
103 default: /* Everything else */
104
105 printk("Unsupported.\n");
106 panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
107 }
108 }
109
110 #define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
111
112 static void __init
set_pmem_entry(physmem_range_t * pmem_ptr,unsigned long start,unsigned long pages4k)113 set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
114 unsigned long pages4k)
115 {
116 /* Rather than aligning and potentially throwing away
117 * memory, we'll assume that any ranges are already
118 * nicely aligned with any reasonable page size, and
119 * panic if they are not (it's more likely that the
120 * pdc info is bad in this case).
121 */
122
123 if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
124 || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
125
126 panic("Memory range doesn't align with page size!\n");
127 }
128
129 pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
130 pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
131 }
132
pagezero_memconfig(void)133 static void __init pagezero_memconfig(void)
134 {
135 unsigned long npages;
136
137 /* Use the 32 bit information from page zero to create a single
138 * entry in the pmem_ranges[] table.
139 *
140 * We currently don't support machines with contiguous memory
141 * >= 4 Gb, who report that memory using 64 bit only fields
142 * on page zero. It's not worth doing until it can be tested,
143 * and it is not clear we can support those machines for other
144 * reasons.
145 *
146 * If that support is done in the future, this is where it
147 * should be done.
148 */
149
150 npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
151 set_pmem_entry(pmem_ranges,0UL,npages);
152 npmem_ranges = 1;
153 }
154
155 #ifdef CONFIG_64BIT
156
157 /* All of the PDC PAT specific code is 64-bit only */
158
159 /*
160 ** The module object is filled via PDC_PAT_CELL[Return Cell Module].
161 ** If a module is found, register module will get the IODC bytes via
162 ** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
163 **
164 ** The IO view can be used by PDC_PAT_CELL[Return Cell Module]
165 ** only for SBAs and LBAs. This view will cause an invalid
166 ** argument error for all other cell module types.
167 **
168 */
169
170 static int __init
pat_query_module(ulong pcell_loc,ulong mod_index)171 pat_query_module(ulong pcell_loc, ulong mod_index)
172 {
173 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
174 unsigned long bytecnt;
175 unsigned long temp; /* 64-bit scratch value */
176 long status; /* PDC return value status */
177 struct parisc_device *dev;
178
179 pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
180 if (!pa_pdc_cell)
181 panic("couldn't allocate memory for PDC_PAT_CELL!");
182
183 /* return cell module (PA or Processor view) */
184 status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
185 PA_VIEW, pa_pdc_cell);
186
187 if (status != PDC_OK) {
188 /* no more cell modules or error */
189 kfree(pa_pdc_cell);
190 return status;
191 }
192
193 temp = pa_pdc_cell->cba;
194 dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
195 if (!dev) {
196 kfree(pa_pdc_cell);
197 return PDC_OK;
198 }
199
200 /* alloc_pa_dev sets dev->hpa */
201
202 /*
203 ** save parameters in the parisc_device
204 ** (The idea being the device driver will call pdc_pat_cell_module()
205 ** and store the results in its own data structure.)
206 */
207 dev->pcell_loc = pcell_loc;
208 dev->mod_index = mod_index;
209
210 /* save generic info returned from the call */
211 /* REVISIT: who is the consumer of this? not sure yet... */
212 dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
213 dev->pmod_loc = pa_pdc_cell->mod_location;
214
215 register_parisc_device(dev); /* advertise device */
216
217 #ifdef DEBUG_PAT
218 pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
219 /* dump what we see so far... */
220 switch (PAT_GET_ENTITY(dev->mod_info)) {
221 unsigned long i;
222
223 case PAT_ENTITY_PROC:
224 printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
225 pa_pdc_cell->mod[0]);
226 break;
227
228 case PAT_ENTITY_MEM:
229 printk(KERN_DEBUG
230 "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
231 pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
232 pa_pdc_cell->mod[2]);
233 break;
234 case PAT_ENTITY_CA:
235 printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
236 break;
237
238 case PAT_ENTITY_PBC:
239 printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
240 goto print_ranges;
241
242 case PAT_ENTITY_SBA:
243 printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
244 goto print_ranges;
245
246 case PAT_ENTITY_LBA:
247 printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
248
249 print_ranges:
250 pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
251 IO_VIEW, &io_pdc_cell);
252 printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
253 for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
254 printk(KERN_DEBUG
255 " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
256 i, pa_pdc_cell->mod[2 + i * 3], /* type */
257 pa_pdc_cell->mod[3 + i * 3], /* start */
258 pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
259 printk(KERN_DEBUG
260 " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
261 i, io_pdc_cell->mod[2 + i * 3], /* type */
262 io_pdc_cell->mod[3 + i * 3], /* start */
263 io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
264 }
265 printk(KERN_DEBUG "\n");
266 break;
267 }
268 #endif /* DEBUG_PAT */
269
270 kfree(pa_pdc_cell);
271
272 return PDC_OK;
273 }
274
275
276 /* pat pdc can return information about a variety of different
277 * types of memory (e.g. firmware,i/o, etc) but we only care about
278 * the usable physical ram right now. Since the firmware specific
279 * information is allocated on the stack, we'll be generous, in
280 * case there is a lot of other information we don't care about.
281 */
282
283 #define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
284
pat_memconfig(void)285 static void __init pat_memconfig(void)
286 {
287 unsigned long actual_len;
288 struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
289 struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
290 physmem_range_t *pmem_ptr;
291 long status;
292 int entries;
293 unsigned long length;
294 int i;
295
296 length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
297
298 status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
299
300 if ((status != PDC_OK)
301 || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
302
303 /* The above pdc call shouldn't fail, but, just in
304 * case, just use the PAGE0 info.
305 */
306
307 printk("\n\n\n");
308 printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
309 "All memory may not be used!\n\n\n");
310 pagezero_memconfig();
311 return;
312 }
313
314 entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
315
316 if (entries > PAT_MAX_RANGES) {
317 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
318 printk(KERN_WARNING "Some memory may not be used!\n");
319 }
320
321 /* Copy information into the firmware independent pmem_ranges
322 * array, skipping types we don't care about. Notice we said
323 * "may" above. We'll use all the entries that were returned.
324 */
325
326 npmem_ranges = 0;
327 mtbl_ptr = mem_table;
328 pmem_ptr = pmem_ranges; /* Global firmware independent table */
329 for (i = 0; i < entries; i++,mtbl_ptr++) {
330 if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
331 || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
332 || (mtbl_ptr->pages == 0)
333 || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
334 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
335 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
336
337 continue;
338 }
339
340 if (npmem_ranges == MAX_PHYSMEM_RANGES) {
341 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
342 printk(KERN_WARNING "Some memory will not be used!\n");
343 break;
344 }
345
346 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
347 npmem_ranges++;
348 }
349 }
350
pat_inventory(void)351 static int __init pat_inventory(void)
352 {
353 int status;
354 ulong mod_index = 0;
355 struct pdc_pat_cell_num cell_info;
356
357 /*
358 ** Note: Prelude (and it's successors: Lclass, A400/500) only
359 ** implement PDC_PAT_CELL sub-options 0 and 2.
360 */
361 status = pdc_pat_cell_get_number(&cell_info);
362 if (status != PDC_OK) {
363 return 0;
364 }
365
366 #ifdef DEBUG_PAT
367 printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
368 cell_info.cell_loc);
369 #endif
370
371 while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
372 mod_index++;
373 }
374
375 return mod_index;
376 }
377
378 /* We only look for extended memory ranges on a 64 bit capable box */
sprockets_memconfig(void)379 static void __init sprockets_memconfig(void)
380 {
381 struct pdc_memory_table_raddr r_addr;
382 struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
383 struct pdc_memory_table *mtbl_ptr;
384 physmem_range_t *pmem_ptr;
385 long status;
386 int entries;
387 int i;
388
389 status = pdc_mem_mem_table(&r_addr,mem_table,
390 (unsigned long)MAX_PHYSMEM_RANGES);
391
392 if (status != PDC_OK) {
393
394 /* The above pdc call only works on boxes with sprockets
395 * firmware (newer B,C,J class). Other non PAT PDC machines
396 * do support more than 3.75 Gb of memory, but we don't
397 * support them yet.
398 */
399
400 pagezero_memconfig();
401 return;
402 }
403
404 if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
405 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
406 printk(KERN_WARNING "Some memory will not be used!\n");
407 }
408
409 entries = (int)r_addr.entries_returned;
410
411 npmem_ranges = 0;
412 mtbl_ptr = mem_table;
413 pmem_ptr = pmem_ranges; /* Global firmware independent table */
414 for (i = 0; i < entries; i++,mtbl_ptr++) {
415 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
416 npmem_ranges++;
417 }
418 }
419
420 #else /* !CONFIG_64BIT */
421
422 #define pat_inventory() do { } while (0)
423 #define pat_memconfig() do { } while (0)
424 #define sprockets_memconfig() pagezero_memconfig()
425
426 #endif /* !CONFIG_64BIT */
427
428
429 #ifndef CONFIG_PA20
430
431 /* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
432
433 static struct parisc_device * __init
legacy_create_device(struct pdc_memory_map * r_addr,struct pdc_module_path * module_path)434 legacy_create_device(struct pdc_memory_map *r_addr,
435 struct pdc_module_path *module_path)
436 {
437 struct parisc_device *dev;
438 int status = pdc_mem_map_hpa(r_addr, module_path);
439 if (status != PDC_OK)
440 return NULL;
441
442 dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
443 if (dev == NULL)
444 return NULL;
445
446 register_parisc_device(dev);
447 return dev;
448 }
449
450 /**
451 * snake_inventory
452 *
453 * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
454 * To use it, we initialise the mod_path.bc to 0xff and try all values of
455 * mod to get the HPA for the top-level devices. Bus adapters may have
456 * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
457 * module, then trying all possible functions.
458 */
snake_inventory(void)459 static void __init snake_inventory(void)
460 {
461 int mod;
462 for (mod = 0; mod < 16; mod++) {
463 struct parisc_device *dev;
464 struct pdc_module_path module_path;
465 struct pdc_memory_map r_addr;
466 unsigned int func;
467
468 memset(module_path.path.bc, 0xff, 6);
469 module_path.path.mod = mod;
470 dev = legacy_create_device(&r_addr, &module_path);
471 if ((!dev) || (dev->id.hw_type != HPHW_BA))
472 continue;
473
474 memset(module_path.path.bc, 0xff, 4);
475 module_path.path.bc[4] = mod;
476
477 for (func = 0; func < 16; func++) {
478 module_path.path.bc[5] = 0;
479 module_path.path.mod = func;
480 legacy_create_device(&r_addr, &module_path);
481 }
482 }
483 }
484
485 #else /* CONFIG_PA20 */
486 #define snake_inventory() do { } while (0)
487 #endif /* CONFIG_PA20 */
488
489 /* Common 32/64 bit based code goes here */
490
491 /**
492 * add_system_map_addresses - Add additional addresses to the parisc device.
493 * @dev: The parisc device.
494 * @num_addrs: Then number of addresses to add;
495 * @module_instance: The system_map module instance.
496 *
497 * This function adds any additional addresses reported by the system_map
498 * firmware to the parisc device.
499 */
500 static void __init
add_system_map_addresses(struct parisc_device * dev,int num_addrs,int module_instance)501 add_system_map_addresses(struct parisc_device *dev, int num_addrs,
502 int module_instance)
503 {
504 int i;
505 long status;
506 struct pdc_system_map_addr_info addr_result;
507
508 dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
509 if(!dev->addr) {
510 printk(KERN_ERR "%s %s(): memory allocation failure\n",
511 __FILE__, __func__);
512 return;
513 }
514
515 for(i = 1; i <= num_addrs; ++i) {
516 status = pdc_system_map_find_addrs(&addr_result,
517 module_instance, i);
518 if(PDC_OK == status) {
519 dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
520 dev->num_addrs++;
521 } else {
522 printk(KERN_WARNING
523 "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
524 status, i);
525 }
526 }
527 }
528
529 /**
530 * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
531 *
532 * This function attempts to retrieve and register all the devices firmware
533 * knows about via the SYSTEM_MAP PDC call.
534 */
system_map_inventory(void)535 static void __init system_map_inventory(void)
536 {
537 int i;
538 long status = PDC_OK;
539
540 for (i = 0; i < 256; i++) {
541 struct parisc_device *dev;
542 struct pdc_system_map_mod_info module_result;
543 struct pdc_module_path module_path;
544
545 status = pdc_system_map_find_mods(&module_result,
546 &module_path, i);
547 if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
548 break;
549 if (status != PDC_OK)
550 continue;
551
552 dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
553 if (!dev)
554 continue;
555
556 register_parisc_device(dev);
557
558 /* if available, get the additional addresses for a module */
559 if (!module_result.add_addrs)
560 continue;
561
562 add_system_map_addresses(dev, module_result.add_addrs, i);
563 }
564
565 walk_central_bus();
566 return;
567 }
568
do_memory_inventory(void)569 void __init do_memory_inventory(void)
570 {
571 switch (pdc_type) {
572
573 case PDC_TYPE_PAT:
574 pat_memconfig();
575 break;
576
577 case PDC_TYPE_SYSTEM_MAP:
578 sprockets_memconfig();
579 break;
580
581 case PDC_TYPE_SNAKE:
582 pagezero_memconfig();
583 return;
584
585 default:
586 panic("Unknown PDC type!\n");
587 }
588
589 if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
590 printk(KERN_WARNING "Bad memory configuration returned!\n");
591 printk(KERN_WARNING "Some memory may not be used!\n");
592 pagezero_memconfig();
593 }
594 }
595
do_device_inventory(void)596 void __init do_device_inventory(void)
597 {
598 printk(KERN_INFO "Searching for devices...\n");
599
600 init_parisc_bus();
601
602 switch (pdc_type) {
603
604 case PDC_TYPE_PAT:
605 pat_inventory();
606 break;
607
608 case PDC_TYPE_SYSTEM_MAP:
609 system_map_inventory();
610 break;
611
612 case PDC_TYPE_SNAKE:
613 snake_inventory();
614 break;
615
616 default:
617 panic("Unknown PDC type!\n");
618 }
619 printk(KERN_INFO "Found devices:\n");
620 print_parisc_devices();
621 }
622