1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * SGI UV architectural definitions
7 *
8 * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
9 * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
10 */
11
12 #ifndef _ASM_X86_UV_UV_HUB_H
13 #define _ASM_X86_UV_UV_HUB_H
14
15 #ifdef CONFIG_X86_64
16 #include <linux/numa.h>
17 #include <linux/percpu.h>
18 #include <linux/timer.h>
19 #include <linux/io.h>
20 #include <linux/topology.h>
21 #include <asm/types.h>
22 #include <asm/percpu.h>
23 #include <asm/uv/uv.h>
24 #include <asm/uv/uv_mmrs.h>
25 #include <asm/uv/bios.h>
26 #include <asm/irq_vectors.h>
27 #include <asm/io_apic.h>
28
29
30 /*
31 * Addressing Terminology
32 *
33 * M - The low M bits of a physical address represent the offset
34 * into the blade local memory. RAM memory on a blade is physically
35 * contiguous (although various IO spaces may punch holes in
36 * it)..
37 *
38 * N - Number of bits in the node portion of a socket physical
39 * address.
40 *
41 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
42 * routers always have low bit of 1, C/MBricks have low bit
43 * equal to 0. Most addressing macros that target UV hub chips
44 * right shift the NASID by 1 to exclude the always-zero bit.
45 * NASIDs contain up to 15 bits.
46 *
47 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
48 * of nasids.
49 *
50 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
51 * of the nasid for socket usage.
52 *
53 * GPA - (global physical address) a socket physical address converted
54 * so that it can be used by the GRU as a global address. Socket
55 * physical addresses 1) need additional NASID (node) bits added
56 * to the high end of the address, and 2) unaliased if the
57 * partition does not have a physical address 0. In addition, on
58 * UV2 rev 1, GPAs need the gnode left shifted to bits 39 or 40.
59 *
60 *
61 * NumaLink Global Physical Address Format:
62 * +--------------------------------+---------------------+
63 * |00..000| GNODE | NodeOffset |
64 * +--------------------------------+---------------------+
65 * |<-------53 - M bits --->|<--------M bits ----->
66 *
67 * M - number of node offset bits (35 .. 40)
68 *
69 *
70 * Memory/UV-HUB Processor Socket Address Format:
71 * +----------------+---------------+---------------------+
72 * |00..000000000000| PNODE | NodeOffset |
73 * +----------------+---------------+---------------------+
74 * <--- N bits --->|<--------M bits ----->
75 *
76 * M - number of node offset bits (35 .. 40)
77 * N - number of PNODE bits (0 .. 10)
78 *
79 * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
80 * The actual values are configuration dependent and are set at
81 * boot time. M & N values are set by the hardware/BIOS at boot.
82 *
83 *
84 * APICID format
85 * NOTE!!!!!! This is the current format of the APICID. However, code
86 * should assume that this will change in the future. Use functions
87 * in this file for all APICID bit manipulations and conversion.
88 *
89 * 1111110000000000
90 * 5432109876543210
91 * pppppppppplc0cch Nehalem-EX (12 bits in hdw reg)
92 * ppppppppplcc0cch Westmere-EX (12 bits in hdw reg)
93 * pppppppppppcccch SandyBridge (15 bits in hdw reg)
94 * sssssssssss
95 *
96 * p = pnode bits
97 * l = socket number on board
98 * c = core
99 * h = hyperthread
100 * s = bits that are in the SOCKET_ID CSR
101 *
102 * Note: Processor may support fewer bits in the APICID register. The ACPI
103 * tables hold all 16 bits. Software needs to be aware of this.
104 *
105 * Unless otherwise specified, all references to APICID refer to
106 * the FULL value contained in ACPI tables, not the subset in the
107 * processor APICID register.
108 */
109
110 /*
111 * Maximum number of bricks in all partitions and in all coherency domains.
112 * This is the total number of bricks accessible in the numalink fabric. It
113 * includes all C & M bricks. Routers are NOT included.
114 *
115 * This value is also the value of the maximum number of non-router NASIDs
116 * in the numalink fabric.
117 *
118 * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
119 */
120 #define UV_MAX_NUMALINK_BLADES 16384
121
122 /*
123 * Maximum number of C/Mbricks within a software SSI (hardware may support
124 * more).
125 */
126 #define UV_MAX_SSI_BLADES 256
127
128 /*
129 * The largest possible NASID of a C or M brick (+ 2)
130 */
131 #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_BLADES * 2)
132
133 /* GAM (globally addressed memory) range table */
134 struct uv_gam_range_s {
135 u32 limit; /* PA bits 56:26 (GAM_RANGE_SHFT) */
136 u16 nasid; /* node's global physical address */
137 s8 base; /* entry index of node's base addr */
138 u8 reserved;
139 };
140
141 /*
142 * The following defines attributes of the HUB chip. These attributes are
143 * frequently referenced and are kept in a common per hub struct.
144 * After setup, the struct is read only, so it should be readily
145 * available in the L3 cache on the cpu socket for the node.
146 */
147 struct uv_hub_info_s {
148 unsigned int hub_type;
149 unsigned char hub_revision;
150 unsigned long global_mmr_base;
151 unsigned long global_mmr_shift;
152 unsigned long gpa_mask;
153 unsigned short *socket_to_node;
154 unsigned short *socket_to_pnode;
155 unsigned short *pnode_to_socket;
156 struct uv_gam_range_s *gr_table;
157 unsigned short min_socket;
158 unsigned short min_pnode;
159 unsigned char m_val;
160 unsigned char n_val;
161 unsigned char gr_table_len;
162 unsigned char apic_pnode_shift;
163 unsigned char gpa_shift;
164 unsigned char nasid_shift;
165 unsigned char m_shift;
166 unsigned char n_lshift;
167 unsigned int gnode_extra;
168 unsigned long gnode_upper;
169 unsigned long lowmem_remap_top;
170 unsigned long lowmem_remap_base;
171 unsigned long global_gru_base;
172 unsigned long global_gru_shift;
173 unsigned short pnode;
174 unsigned short pnode_mask;
175 unsigned short coherency_domain_number;
176 unsigned short numa_blade_id;
177 unsigned short nr_possible_cpus;
178 unsigned short nr_online_cpus;
179 short memory_nid;
180 };
181
182 /* CPU specific info with a pointer to the hub common info struct */
183 struct uv_cpu_info_s {
184 void *p_uv_hub_info;
185 unsigned char blade_cpu_id;
186 void *reserved;
187 };
188 DECLARE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
189
190 #define uv_cpu_info this_cpu_ptr(&__uv_cpu_info)
191 #define uv_cpu_info_per(cpu) (&per_cpu(__uv_cpu_info, cpu))
192
193 /* Node specific hub common info struct */
194 extern void **__uv_hub_info_list;
uv_hub_info_list(int node)195 static inline struct uv_hub_info_s *uv_hub_info_list(int node)
196 {
197 return (struct uv_hub_info_s *)__uv_hub_info_list[node];
198 }
199
_uv_hub_info(void)200 static inline struct uv_hub_info_s *_uv_hub_info(void)
201 {
202 return (struct uv_hub_info_s *)uv_cpu_info->p_uv_hub_info;
203 }
204 #define uv_hub_info _uv_hub_info()
205
uv_cpu_hub_info(int cpu)206 static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
207 {
208 return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info;
209 }
210
uv_hub_type(void)211 static inline int uv_hub_type(void)
212 {
213 return uv_hub_info->hub_type;
214 }
215
uv_hub_type_set(int uvmask)216 static inline __init void uv_hub_type_set(int uvmask)
217 {
218 uv_hub_info->hub_type = uvmask;
219 }
220
221
222 /*
223 * HUB revision ranges for each UV HUB architecture.
224 * This is a software convention - NOT the hardware revision numbers in
225 * the hub chip.
226 */
227 #define UV2_HUB_REVISION_BASE 3
228 #define UV3_HUB_REVISION_BASE 5
229 #define UV4_HUB_REVISION_BASE 7
230 #define UV4A_HUB_REVISION_BASE 8 /* UV4 (fixed) rev 2 */
231 #define UV5_HUB_REVISION_BASE 9
232
is_uv(int uvmask)233 static inline int is_uv(int uvmask) { return uv_hub_type() & uvmask; }
is_uv1_hub(void)234 static inline int is_uv1_hub(void) { return 0; }
is_uv2_hub(void)235 static inline int is_uv2_hub(void) { return is_uv(UV2); }
is_uv3_hub(void)236 static inline int is_uv3_hub(void) { return is_uv(UV3); }
is_uv4a_hub(void)237 static inline int is_uv4a_hub(void) { return is_uv(UV4A); }
is_uv4_hub(void)238 static inline int is_uv4_hub(void) { return is_uv(UV4); }
is_uv5_hub(void)239 static inline int is_uv5_hub(void) { return is_uv(UV5); }
240
241 /*
242 * UV4A is a revision of UV4. So on UV4A, both is_uv4_hub() and
243 * is_uv4a_hub() return true, While on UV4, only is_uv4_hub()
244 * returns true. So to get true results, first test if is UV4A,
245 * then test if is UV4.
246 */
247
248 /* UVX class: UV2,3,4 */
is_uvx_hub(void)249 static inline int is_uvx_hub(void) { return is_uv(UVX); }
250
251 /* UVY class: UV5,..? */
is_uvy_hub(void)252 static inline int is_uvy_hub(void) { return is_uv(UVY); }
253
254 /* Any UV Hubbed System */
is_uv_hub(void)255 static inline int is_uv_hub(void) { return is_uv(UV_ANY); }
256
257 union uvh_apicid {
258 unsigned long v;
259 struct uvh_apicid_s {
260 unsigned long local_apic_mask : 24;
261 unsigned long local_apic_shift : 5;
262 unsigned long unused1 : 3;
263 unsigned long pnode_mask : 24;
264 unsigned long pnode_shift : 5;
265 unsigned long unused2 : 3;
266 } s;
267 };
268
269 /*
270 * Local & Global MMR space macros.
271 * Note: macros are intended to be used ONLY by inline functions
272 * in this file - not by other kernel code.
273 * n - NASID (full 15-bit global nasid)
274 * g - GNODE (full 15-bit global nasid, right shifted 1)
275 * p - PNODE (local part of nsids, right shifted 1)
276 */
277 #define UV_NASID_TO_PNODE(n) \
278 (((n) >> uv_hub_info->nasid_shift) & uv_hub_info->pnode_mask)
279 #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
280 #define UV_PNODE_TO_NASID(p) \
281 (UV_PNODE_TO_GNODE(p) << uv_hub_info->nasid_shift)
282
283 #define UV2_LOCAL_MMR_BASE 0xfa000000UL
284 #define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
285 #define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
286 #define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
287
288 #define UV3_LOCAL_MMR_BASE 0xfa000000UL
289 #define UV3_GLOBAL_MMR32_BASE 0xfc000000UL
290 #define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
291 #define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
292
293 #define UV4_LOCAL_MMR_BASE 0xfa000000UL
294 #define UV4_GLOBAL_MMR32_BASE 0
295 #define UV4_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
296 #define UV4_GLOBAL_MMR32_SIZE 0
297
298 #define UV5_LOCAL_MMR_BASE 0xfa000000UL
299 #define UV5_GLOBAL_MMR32_BASE 0
300 #define UV5_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
301 #define UV5_GLOBAL_MMR32_SIZE 0
302
303 #define UV_LOCAL_MMR_BASE ( \
304 is_uv(UV2) ? UV2_LOCAL_MMR_BASE : \
305 is_uv(UV3) ? UV3_LOCAL_MMR_BASE : \
306 is_uv(UV4) ? UV4_LOCAL_MMR_BASE : \
307 is_uv(UV5) ? UV5_LOCAL_MMR_BASE : \
308 0)
309
310 #define UV_GLOBAL_MMR32_BASE ( \
311 is_uv(UV2) ? UV2_GLOBAL_MMR32_BASE : \
312 is_uv(UV3) ? UV3_GLOBAL_MMR32_BASE : \
313 is_uv(UV4) ? UV4_GLOBAL_MMR32_BASE : \
314 is_uv(UV5) ? UV5_GLOBAL_MMR32_BASE : \
315 0)
316
317 #define UV_LOCAL_MMR_SIZE ( \
318 is_uv(UV2) ? UV2_LOCAL_MMR_SIZE : \
319 is_uv(UV3) ? UV3_LOCAL_MMR_SIZE : \
320 is_uv(UV4) ? UV4_LOCAL_MMR_SIZE : \
321 is_uv(UV5) ? UV5_LOCAL_MMR_SIZE : \
322 0)
323
324 #define UV_GLOBAL_MMR32_SIZE ( \
325 is_uv(UV2) ? UV2_GLOBAL_MMR32_SIZE : \
326 is_uv(UV3) ? UV3_GLOBAL_MMR32_SIZE : \
327 is_uv(UV4) ? UV4_GLOBAL_MMR32_SIZE : \
328 is_uv(UV5) ? UV5_GLOBAL_MMR32_SIZE : \
329 0)
330
331 #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
332
333 #define UV_GLOBAL_GRU_MMR_BASE 0x4000000
334
335 #define UV_GLOBAL_MMR32_PNODE_SHIFT 15
336 #define _UV_GLOBAL_MMR64_PNODE_SHIFT 26
337 #define UV_GLOBAL_MMR64_PNODE_SHIFT (uv_hub_info->global_mmr_shift)
338
339 #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
340
341 #define UV_GLOBAL_MMR64_PNODE_BITS(p) \
342 (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
343
344 #define UVH_APICID 0x002D0E00L
345 #define UV_APIC_PNODE_SHIFT 6
346
347 /* Local Bus from cpu's perspective */
348 #define LOCAL_BUS_BASE 0x1c00000
349 #define LOCAL_BUS_SIZE (4 * 1024 * 1024)
350
351 /*
352 * System Controller Interface Reg
353 *
354 * Note there are NO leds on a UV system. This register is only
355 * used by the system controller to monitor system-wide operation.
356 * There are 64 regs per node. With Nahelem cpus (2 cores per node,
357 * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on
358 * a node.
359 *
360 * The window is located at top of ACPI MMR space
361 */
362 #define SCIR_WINDOW_COUNT 64
363 #define SCIR_LOCAL_MMR_BASE (LOCAL_BUS_BASE + \
364 LOCAL_BUS_SIZE - \
365 SCIR_WINDOW_COUNT)
366
367 #define SCIR_CPU_HEARTBEAT 0x01 /* timer interrupt */
368 #define SCIR_CPU_ACTIVITY 0x02 /* not idle */
369 #define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */
370
371 /* Loop through all installed blades */
372 #define for_each_possible_blade(bid) \
373 for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++)
374
375 /*
376 * Macros for converting between kernel virtual addresses, socket local physical
377 * addresses, and UV global physical addresses.
378 * Note: use the standard __pa() & __va() macros for converting
379 * between socket virtual and socket physical addresses.
380 */
381
382 /* global bits offset - number of local address bits in gpa for this UV arch */
uv_gpa_shift(void)383 static inline unsigned int uv_gpa_shift(void)
384 {
385 return uv_hub_info->gpa_shift;
386 }
387 #define _uv_gpa_shift
388
389 /* Find node that has the address range that contains global address */
uv_gam_range(unsigned long pa)390 static inline struct uv_gam_range_s *uv_gam_range(unsigned long pa)
391 {
392 struct uv_gam_range_s *gr = uv_hub_info->gr_table;
393 unsigned long pal = (pa & uv_hub_info->gpa_mask) >> UV_GAM_RANGE_SHFT;
394 int i, num = uv_hub_info->gr_table_len;
395
396 if (gr) {
397 for (i = 0; i < num; i++, gr++) {
398 if (pal < gr->limit)
399 return gr;
400 }
401 }
402 pr_crit("UV: GAM Range for 0x%lx not found at %p!\n", pa, gr);
403 BUG();
404 }
405
406 /* Return base address of node that contains global address */
uv_gam_range_base(unsigned long pa)407 static inline unsigned long uv_gam_range_base(unsigned long pa)
408 {
409 struct uv_gam_range_s *gr = uv_gam_range(pa);
410 int base = gr->base;
411
412 if (base < 0)
413 return 0UL;
414
415 return uv_hub_info->gr_table[base].limit;
416 }
417
418 /* socket phys RAM --> UV global NASID (UV4+) */
uv_soc_phys_ram_to_nasid(unsigned long paddr)419 static inline unsigned long uv_soc_phys_ram_to_nasid(unsigned long paddr)
420 {
421 return uv_gam_range(paddr)->nasid;
422 }
423 #define _uv_soc_phys_ram_to_nasid
424
425 /* socket virtual --> UV global NASID (UV4+) */
uv_gpa_nasid(void * v)426 static inline unsigned long uv_gpa_nasid(void *v)
427 {
428 return uv_soc_phys_ram_to_nasid(__pa(v));
429 }
430
431 /* socket phys RAM --> UV global physical address */
uv_soc_phys_ram_to_gpa(unsigned long paddr)432 static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
433 {
434 unsigned int m_val = uv_hub_info->m_val;
435
436 if (paddr < uv_hub_info->lowmem_remap_top)
437 paddr |= uv_hub_info->lowmem_remap_base;
438
439 if (m_val) {
440 paddr |= uv_hub_info->gnode_upper;
441 paddr = ((paddr << uv_hub_info->m_shift)
442 >> uv_hub_info->m_shift) |
443 ((paddr >> uv_hub_info->m_val)
444 << uv_hub_info->n_lshift);
445 } else {
446 paddr |= uv_soc_phys_ram_to_nasid(paddr)
447 << uv_hub_info->gpa_shift;
448 }
449 return paddr;
450 }
451
452 /* socket virtual --> UV global physical address */
uv_gpa(void * v)453 static inline unsigned long uv_gpa(void *v)
454 {
455 return uv_soc_phys_ram_to_gpa(__pa(v));
456 }
457
458 /* Top two bits indicate the requested address is in MMR space. */
459 static inline int
uv_gpa_in_mmr_space(unsigned long gpa)460 uv_gpa_in_mmr_space(unsigned long gpa)
461 {
462 return (gpa >> 62) == 0x3UL;
463 }
464
465 /* UV global physical address --> socket phys RAM */
uv_gpa_to_soc_phys_ram(unsigned long gpa)466 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
467 {
468 unsigned long paddr;
469 unsigned long remap_base = uv_hub_info->lowmem_remap_base;
470 unsigned long remap_top = uv_hub_info->lowmem_remap_top;
471 unsigned int m_val = uv_hub_info->m_val;
472
473 if (m_val)
474 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
475 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
476
477 paddr = gpa & uv_hub_info->gpa_mask;
478 if (paddr >= remap_base && paddr < remap_base + remap_top)
479 paddr -= remap_base;
480 return paddr;
481 }
482
483 /* gpa -> gnode */
uv_gpa_to_gnode(unsigned long gpa)484 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
485 {
486 unsigned int n_lshift = uv_hub_info->n_lshift;
487
488 if (n_lshift)
489 return gpa >> n_lshift;
490
491 return uv_gam_range(gpa)->nasid >> 1;
492 }
493
494 /* gpa -> pnode */
uv_gpa_to_pnode(unsigned long gpa)495 static inline int uv_gpa_to_pnode(unsigned long gpa)
496 {
497 return uv_gpa_to_gnode(gpa) & uv_hub_info->pnode_mask;
498 }
499
500 /* gpa -> node offset */
uv_gpa_to_offset(unsigned long gpa)501 static inline unsigned long uv_gpa_to_offset(unsigned long gpa)
502 {
503 unsigned int m_shift = uv_hub_info->m_shift;
504
505 if (m_shift)
506 return (gpa << m_shift) >> m_shift;
507
508 return (gpa & uv_hub_info->gpa_mask) - uv_gam_range_base(gpa);
509 }
510
511 /* Convert socket to node */
_uv_socket_to_node(int socket,unsigned short * s2nid)512 static inline int _uv_socket_to_node(int socket, unsigned short *s2nid)
513 {
514 return s2nid ? s2nid[socket - uv_hub_info->min_socket] : socket;
515 }
516
uv_socket_to_node(int socket)517 static inline int uv_socket_to_node(int socket)
518 {
519 return _uv_socket_to_node(socket, uv_hub_info->socket_to_node);
520 }
521
522 /* pnode, offset --> socket virtual */
uv_pnode_offset_to_vaddr(int pnode,unsigned long offset)523 static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
524 {
525 unsigned int m_val = uv_hub_info->m_val;
526 unsigned long base;
527 unsigned short sockid, node, *p2s;
528
529 if (m_val)
530 return __va(((unsigned long)pnode << m_val) | offset);
531
532 p2s = uv_hub_info->pnode_to_socket;
533 sockid = p2s ? p2s[pnode - uv_hub_info->min_pnode] : pnode;
534 node = uv_socket_to_node(sockid);
535
536 /* limit address of previous socket is our base, except node 0 is 0 */
537 if (!node)
538 return __va((unsigned long)offset);
539
540 base = (unsigned long)(uv_hub_info->gr_table[node - 1].limit);
541 return __va(base << UV_GAM_RANGE_SHFT | offset);
542 }
543
544 /* Extract/Convert a PNODE from an APICID (full apicid, not processor subset) */
uv_apicid_to_pnode(int apicid)545 static inline int uv_apicid_to_pnode(int apicid)
546 {
547 int pnode = apicid >> uv_hub_info->apic_pnode_shift;
548 unsigned short *s2pn = uv_hub_info->socket_to_pnode;
549
550 return s2pn ? s2pn[pnode - uv_hub_info->min_socket] : pnode;
551 }
552
553 /*
554 * Access global MMRs using the low memory MMR32 space. This region supports
555 * faster MMR access but not all MMRs are accessible in this space.
556 */
uv_global_mmr32_address(int pnode,unsigned long offset)557 static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset)
558 {
559 return __va(UV_GLOBAL_MMR32_BASE |
560 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
561 }
562
uv_write_global_mmr32(int pnode,unsigned long offset,unsigned long val)563 static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val)
564 {
565 writeq(val, uv_global_mmr32_address(pnode, offset));
566 }
567
uv_read_global_mmr32(int pnode,unsigned long offset)568 static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset)
569 {
570 return readq(uv_global_mmr32_address(pnode, offset));
571 }
572
573 /*
574 * Access Global MMR space using the MMR space located at the top of physical
575 * memory.
576 */
uv_global_mmr64_address(int pnode,unsigned long offset)577 static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset)
578 {
579 return __va(UV_GLOBAL_MMR64_BASE |
580 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
581 }
582
uv_write_global_mmr64(int pnode,unsigned long offset,unsigned long val)583 static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val)
584 {
585 writeq(val, uv_global_mmr64_address(pnode, offset));
586 }
587
uv_read_global_mmr64(int pnode,unsigned long offset)588 static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset)
589 {
590 return readq(uv_global_mmr64_address(pnode, offset));
591 }
592
uv_write_global_mmr8(int pnode,unsigned long offset,unsigned char val)593 static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
594 {
595 writeb(val, uv_global_mmr64_address(pnode, offset));
596 }
597
uv_read_global_mmr8(int pnode,unsigned long offset)598 static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset)
599 {
600 return readb(uv_global_mmr64_address(pnode, offset));
601 }
602
603 /*
604 * Access hub local MMRs. Faster than using global space but only local MMRs
605 * are accessible.
606 */
uv_local_mmr_address(unsigned long offset)607 static inline unsigned long *uv_local_mmr_address(unsigned long offset)
608 {
609 return __va(UV_LOCAL_MMR_BASE | offset);
610 }
611
uv_read_local_mmr(unsigned long offset)612 static inline unsigned long uv_read_local_mmr(unsigned long offset)
613 {
614 return readq(uv_local_mmr_address(offset));
615 }
616
uv_write_local_mmr(unsigned long offset,unsigned long val)617 static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
618 {
619 writeq(val, uv_local_mmr_address(offset));
620 }
621
uv_read_local_mmr8(unsigned long offset)622 static inline unsigned char uv_read_local_mmr8(unsigned long offset)
623 {
624 return readb(uv_local_mmr_address(offset));
625 }
626
uv_write_local_mmr8(unsigned long offset,unsigned char val)627 static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val)
628 {
629 writeb(val, uv_local_mmr_address(offset));
630 }
631
632 /* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
uv_blade_processor_id(void)633 static inline int uv_blade_processor_id(void)
634 {
635 return uv_cpu_info->blade_cpu_id;
636 }
637
638 /* Blade-local cpu number of cpu N. Numbered 0 .. <# cpus on the blade> */
uv_cpu_blade_processor_id(int cpu)639 static inline int uv_cpu_blade_processor_id(int cpu)
640 {
641 return uv_cpu_info_per(cpu)->blade_cpu_id;
642 }
643
644 /* Blade number to Node number (UV2..UV4 is 1:1) */
uv_blade_to_node(int blade)645 static inline int uv_blade_to_node(int blade)
646 {
647 return blade;
648 }
649
650 /* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
uv_numa_blade_id(void)651 static inline int uv_numa_blade_id(void)
652 {
653 return uv_hub_info->numa_blade_id;
654 }
655
656 /*
657 * Convert linux node number to the UV blade number.
658 * .. Currently for UV2 thru UV4 the node and the blade are identical.
659 * .. If this changes then you MUST check references to this function!
660 */
uv_node_to_blade_id(int nid)661 static inline int uv_node_to_blade_id(int nid)
662 {
663 return nid;
664 }
665
666 /* Convert a CPU number to the UV blade number */
uv_cpu_to_blade_id(int cpu)667 static inline int uv_cpu_to_blade_id(int cpu)
668 {
669 return uv_node_to_blade_id(cpu_to_node(cpu));
670 }
671
672 /* Convert a blade id to the PNODE of the blade */
uv_blade_to_pnode(int bid)673 static inline int uv_blade_to_pnode(int bid)
674 {
675 return uv_hub_info_list(uv_blade_to_node(bid))->pnode;
676 }
677
678 /* Nid of memory node on blade. -1 if no blade-local memory */
uv_blade_to_memory_nid(int bid)679 static inline int uv_blade_to_memory_nid(int bid)
680 {
681 return uv_hub_info_list(uv_blade_to_node(bid))->memory_nid;
682 }
683
684 /* Determine the number of possible cpus on a blade */
uv_blade_nr_possible_cpus(int bid)685 static inline int uv_blade_nr_possible_cpus(int bid)
686 {
687 return uv_hub_info_list(uv_blade_to_node(bid))->nr_possible_cpus;
688 }
689
690 /* Determine the number of online cpus on a blade */
uv_blade_nr_online_cpus(int bid)691 static inline int uv_blade_nr_online_cpus(int bid)
692 {
693 return uv_hub_info_list(uv_blade_to_node(bid))->nr_online_cpus;
694 }
695
696 /* Convert a cpu id to the PNODE of the blade containing the cpu */
uv_cpu_to_pnode(int cpu)697 static inline int uv_cpu_to_pnode(int cpu)
698 {
699 return uv_cpu_hub_info(cpu)->pnode;
700 }
701
702 /* Convert a linux node number to the PNODE of the blade */
uv_node_to_pnode(int nid)703 static inline int uv_node_to_pnode(int nid)
704 {
705 return uv_hub_info_list(nid)->pnode;
706 }
707
708 /* Maximum possible number of blades */
709 extern short uv_possible_blades;
uv_num_possible_blades(void)710 static inline int uv_num_possible_blades(void)
711 {
712 return uv_possible_blades;
713 }
714
715 /* Per Hub NMI support */
716 extern void uv_nmi_setup(void);
717 extern void uv_nmi_setup_hubless(void);
718
719 /* BIOS/Kernel flags exchange MMR */
720 #define UVH_BIOS_KERNEL_MMR UVH_SCRATCH5
721 #define UVH_BIOS_KERNEL_MMR_ALIAS UVH_SCRATCH5_ALIAS
722 #define UVH_BIOS_KERNEL_MMR_ALIAS_2 UVH_SCRATCH5_ALIAS_2
723
724 /* TSC sync valid, set by BIOS */
725 #define UVH_TSC_SYNC_MMR UVH_BIOS_KERNEL_MMR
726 #define UVH_TSC_SYNC_SHIFT 10
727 #define UVH_TSC_SYNC_SHIFT_UV2K 16 /* UV2/3k have different bits */
728 #define UVH_TSC_SYNC_MASK 3 /* 0011 */
729 #define UVH_TSC_SYNC_VALID 3 /* 0011 */
730 #define UVH_TSC_SYNC_UNKNOWN 0 /* 0000 */
731
732 /* BMC sets a bit this MMR non-zero before sending an NMI */
733 #define UVH_NMI_MMR UVH_BIOS_KERNEL_MMR
734 #define UVH_NMI_MMR_CLEAR UVH_BIOS_KERNEL_MMR_ALIAS
735 #define UVH_NMI_MMR_SHIFT 63
736 #define UVH_NMI_MMR_TYPE "SCRATCH5"
737
738 struct uv_hub_nmi_s {
739 raw_spinlock_t nmi_lock;
740 atomic_t in_nmi; /* flag this node in UV NMI IRQ */
741 atomic_t cpu_owner; /* last locker of this struct */
742 atomic_t read_mmr_count; /* count of MMR reads */
743 atomic_t nmi_count; /* count of true UV NMIs */
744 unsigned long nmi_value; /* last value read from NMI MMR */
745 bool hub_present; /* false means UV hubless system */
746 bool pch_owner; /* indicates this hub owns PCH */
747 };
748
749 struct uv_cpu_nmi_s {
750 struct uv_hub_nmi_s *hub;
751 int state;
752 int pinging;
753 int queries;
754 int pings;
755 };
756
757 DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
758
759 #define uv_hub_nmi this_cpu_read(uv_cpu_nmi.hub)
760 #define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu))
761 #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub)
762
763 /* uv_cpu_nmi_states */
764 #define UV_NMI_STATE_OUT 0
765 #define UV_NMI_STATE_IN 1
766 #define UV_NMI_STATE_DUMP 2
767 #define UV_NMI_STATE_DUMP_DONE 3
768
769 /*
770 * Get the minimum revision number of the hub chips within the partition.
771 * (See UVx_HUB_REVISION_BASE above for specific values.)
772 */
uv_get_min_hub_revision_id(void)773 static inline int uv_get_min_hub_revision_id(void)
774 {
775 return uv_hub_info->hub_revision;
776 }
777
778 #endif /* CONFIG_X86_64 */
779 #endif /* _ASM_X86_UV_UV_HUB_H */
780