• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * zcore module to export memory content and register sets for creating system
3  * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
4  * dump format as s390 standalone dumps.
5  *
6  * For more information please refer to Documentation/s390/zfcpdump.txt
7  *
8  * Copyright IBM Corp. 2003,2007
9  * Author(s): Michael Holzheu
10  */
11 
12 #define KMSG_COMPONENT "zdump"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 
15 #include <linux/init.h>
16 #include <linux/miscdevice.h>
17 #include <linux/utsname.h>
18 #include <linux/debugfs.h>
19 #include <asm/ipl.h>
20 #include <asm/sclp.h>
21 #include <asm/setup.h>
22 #include <asm/sigp.h>
23 #include <asm/uaccess.h>
24 #include <asm/debug.h>
25 #include <asm/processor.h>
26 #include <asm/irqflags.h>
27 #include "sclp.h"
28 
29 #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
30 
31 #define TO_USER		0
32 #define TO_KERNEL	1
33 #define CHUNK_INFO_SIZE	34 /* 2 16-byte char, each followed by blank */
34 
35 enum arch_id {
36 	ARCH_S390	= 0,
37 	ARCH_S390X	= 1,
38 };
39 
40 /* dump system info */
41 
42 struct sys_info {
43 	enum arch_id	arch;
44 	unsigned long	sa_base;
45 	u32		sa_size;
46 	int		cpu_map[NR_CPUS];
47 	unsigned long	mem_size;
48 	union save_area	lc_mask;
49 };
50 
51 static struct sys_info sys_info;
52 static struct debug_info *zcore_dbf;
53 static int hsa_available;
54 static struct dentry *zcore_dir;
55 static struct dentry *zcore_file;
56 static struct dentry *zcore_memmap_file;
57 
58 /*
59  * Copy memory from HSA to kernel or user memory (not reentrant):
60  *
61  * @dest:  Kernel or user buffer where memory should be copied to
62  * @src:   Start address within HSA where data should be copied
63  * @count: Size of buffer, which should be copied
64  * @mode:  Either TO_KERNEL or TO_USER
65  */
memcpy_hsa(void * dest,unsigned long src,size_t count,int mode)66 static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
67 {
68 	int offs, blk_num;
69 	static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
70 
71 	if (count == 0)
72 		return 0;
73 
74 	/* copy first block */
75 	offs = 0;
76 	if ((src % PAGE_SIZE) != 0) {
77 		blk_num = src / PAGE_SIZE + 2;
78 		if (sclp_sdias_copy(buf, blk_num, 1)) {
79 			TRACE("sclp_sdias_copy() failed\n");
80 			return -EIO;
81 		}
82 		offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
83 		if (mode == TO_USER) {
84 			if (copy_to_user((__force __user void*) dest,
85 					 buf + (src % PAGE_SIZE), offs))
86 				return -EFAULT;
87 		} else
88 			memcpy(dest, buf + (src % PAGE_SIZE), offs);
89 	}
90 	if (offs == count)
91 		goto out;
92 
93 	/* copy middle */
94 	for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
95 		blk_num = (src + offs) / PAGE_SIZE + 2;
96 		if (sclp_sdias_copy(buf, blk_num, 1)) {
97 			TRACE("sclp_sdias_copy() failed\n");
98 			return -EIO;
99 		}
100 		if (mode == TO_USER) {
101 			if (copy_to_user((__force __user void*) dest + offs,
102 					 buf, PAGE_SIZE))
103 				return -EFAULT;
104 		} else
105 			memcpy(dest + offs, buf, PAGE_SIZE);
106 	}
107 	if (offs == count)
108 		goto out;
109 
110 	/* copy last block */
111 	blk_num = (src + offs) / PAGE_SIZE + 2;
112 	if (sclp_sdias_copy(buf, blk_num, 1)) {
113 		TRACE("sclp_sdias_copy() failed\n");
114 		return -EIO;
115 	}
116 	if (mode == TO_USER) {
117 		if (copy_to_user((__force __user void*) dest + offs, buf,
118 				 PAGE_SIZE))
119 			return -EFAULT;
120 	} else
121 		memcpy(dest + offs, buf, count - offs);
122 out:
123 	return 0;
124 }
125 
memcpy_hsa_user(void __user * dest,unsigned long src,size_t count)126 static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
127 {
128 	return memcpy_hsa((void __force *) dest, src, count, TO_USER);
129 }
130 
memcpy_hsa_kernel(void * dest,unsigned long src,size_t count)131 static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
132 {
133 	return memcpy_hsa(dest, src, count, TO_KERNEL);
134 }
135 
memcpy_real(void * dest,unsigned long src,size_t count)136 static int memcpy_real(void *dest, unsigned long src, size_t count)
137 {
138 	unsigned long flags;
139 	int rc = -EFAULT;
140 	register unsigned long _dest asm("2") = (unsigned long) dest;
141 	register unsigned long _len1 asm("3") = (unsigned long) count;
142 	register unsigned long _src  asm("4") = src;
143 	register unsigned long _len2 asm("5") = (unsigned long) count;
144 
145 	if (count == 0)
146 		return 0;
147 	flags = __raw_local_irq_stnsm(0xf8UL); /* switch to real mode */
148 	asm volatile (
149 		"0:	mvcle	%1,%2,0x0\n"
150 		"1:	jo	0b\n"
151 		"	lhi	%0,0x0\n"
152 		"2:\n"
153 		EX_TABLE(1b,2b)
154 		: "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
155 		  "+d" (_len2), "=m" (*((long*)dest))
156 		: "m" (*((long*)src))
157 		: "cc", "memory");
158 	__raw_local_irq_ssm(flags);
159 
160 	return rc;
161 }
162 
memcpy_real_user(void __user * dest,unsigned long src,size_t count)163 static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
164 {
165 	static char buf[4096];
166 	int offs = 0, size;
167 
168 	while (offs < count) {
169 		size = min(sizeof(buf), count - offs);
170 		if (memcpy_real(buf, src + offs, size))
171 			return -EFAULT;
172 		if (copy_to_user(dest + offs, buf, size))
173 			return -EFAULT;
174 		offs += size;
175 	}
176 	return 0;
177 }
178 
179 #ifdef __s390x__
180 /*
181  * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info
182  */
s390x_to_s390_regs(union save_area * out,union save_area * in,int cpu)183 static void __init s390x_to_s390_regs(union save_area *out, union save_area *in,
184 				      int cpu)
185 {
186 	int i;
187 
188 	for (i = 0; i < 16; i++) {
189 		out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff;
190 		out->s390.acc_regs[i] = in->s390x.acc_regs[i];
191 		out->s390.ctrl_regs[i] =
192 			in->s390x.ctrl_regs[i] & 0x00000000ffffffff;
193 	}
194 	/* locore for 31 bit has only space for fpregs 0,2,4,6 */
195 	out->s390.fp_regs[0] = in->s390x.fp_regs[0];
196 	out->s390.fp_regs[1] = in->s390x.fp_regs[2];
197 	out->s390.fp_regs[2] = in->s390x.fp_regs[4];
198 	out->s390.fp_regs[3] = in->s390x.fp_regs[6];
199 	memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4);
200 	out->s390.psw[1] |= 0x8; /* set bit 12 */
201 	memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4);
202 	out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */
203 	out->s390.pref_reg = in->s390x.pref_reg;
204 	out->s390.timer = in->s390x.timer;
205 	out->s390.clk_cmp = in->s390x.clk_cmp;
206 }
207 
s390x_to_s390_save_areas(void)208 static void __init s390x_to_s390_save_areas(void)
209 {
210 	int i = 1;
211 	static union save_area tmp;
212 
213 	while (zfcpdump_save_areas[i]) {
214 		s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i);
215 		memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp));
216 		i++;
217 	}
218 }
219 
220 #endif /* __s390x__ */
221 
init_cpu_info(enum arch_id arch)222 static int __init init_cpu_info(enum arch_id arch)
223 {
224 	union save_area *sa;
225 
226 	/* get info for boot cpu from lowcore, stored in the HSA */
227 
228 	sa = kmalloc(sizeof(*sa), GFP_KERNEL);
229 	if (!sa)
230 		return -ENOMEM;
231 	if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
232 		TRACE("could not copy from HSA\n");
233 		kfree(sa);
234 		return -EIO;
235 	}
236 	zfcpdump_save_areas[0] = sa;
237 
238 #ifdef __s390x__
239 	/* convert s390x regs to s390, if we are dumping an s390 Linux */
240 
241 	if (arch == ARCH_S390)
242 		s390x_to_s390_save_areas();
243 #endif
244 
245 	return 0;
246 }
247 
248 static DEFINE_MUTEX(zcore_mutex);
249 
250 #define DUMP_VERSION	0x3
251 #define DUMP_MAGIC	0xa8190173618f23fdULL
252 #define DUMP_ARCH_S390X	2
253 #define DUMP_ARCH_S390	1
254 #define HEADER_SIZE	4096
255 
256 /* dump header dumped according to s390 crash dump format */
257 
258 struct zcore_header {
259 	u64 magic;
260 	u32 version;
261 	u32 header_size;
262 	u32 dump_level;
263 	u32 page_size;
264 	u64 mem_size;
265 	u64 mem_start;
266 	u64 mem_end;
267 	u32 num_pages;
268 	u32 pad1;
269 	u64 tod;
270 	cpuid_t cpu_id;
271 	u32 arch_id;
272 	u32 volnr;
273 	u32 build_arch;
274 	u64 rmem_size;
275 	char pad2[4016];
276 } __attribute__((packed,__aligned__(16)));
277 
278 static struct zcore_header zcore_header = {
279 	.magic		= DUMP_MAGIC,
280 	.version	= DUMP_VERSION,
281 	.header_size	= 4096,
282 	.dump_level	= 0,
283 	.page_size	= PAGE_SIZE,
284 	.mem_start	= 0,
285 #ifdef __s390x__
286 	.build_arch	= DUMP_ARCH_S390X,
287 #else
288 	.build_arch	= DUMP_ARCH_S390,
289 #endif
290 };
291 
292 /*
293  * Copy lowcore info to buffer. Use map in order to copy only register parts.
294  *
295  * @buf:    User buffer
296  * @sa:     Pointer to save area
297  * @sa_off: Offset in save area to copy
298  * @len:    Number of bytes to copy
299  */
copy_lc(void __user * buf,void * sa,int sa_off,int len)300 static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
301 {
302 	int i;
303 	char *lc_mask = (char*)&sys_info.lc_mask;
304 
305 	for (i = 0; i < len; i++) {
306 		if (!lc_mask[i + sa_off])
307 			continue;
308 		if (copy_to_user(buf + i, sa + sa_off + i, 1))
309 			return -EFAULT;
310 	}
311 	return 0;
312 }
313 
314 /*
315  * Copy lowcores info to memory, if necessary
316  *
317  * @buf:   User buffer
318  * @addr:  Start address of buffer in dump memory
319  * @count: Size of buffer
320  */
zcore_add_lc(char __user * buf,unsigned long start,size_t count)321 static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
322 {
323 	unsigned long end;
324 	int i = 0;
325 
326 	if (count == 0)
327 		return 0;
328 
329 	end = start + count;
330 	while (zfcpdump_save_areas[i]) {
331 		unsigned long cp_start, cp_end; /* copy range */
332 		unsigned long sa_start, sa_end; /* save area range */
333 		unsigned long prefix;
334 		unsigned long sa_off, len, buf_off;
335 
336 		if (sys_info.arch == ARCH_S390)
337 			prefix = zfcpdump_save_areas[i]->s390.pref_reg;
338 		else
339 			prefix = zfcpdump_save_areas[i]->s390x.pref_reg;
340 
341 		sa_start = prefix + sys_info.sa_base;
342 		sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
343 
344 		if ((end < sa_start) || (start > sa_end))
345 			goto next;
346 		cp_start = max(start, sa_start);
347 		cp_end = min(end, sa_end);
348 
349 		buf_off = cp_start - start;
350 		sa_off = cp_start - sa_start;
351 		len = cp_end - cp_start;
352 
353 		TRACE("copy_lc for: %lx\n", start);
354 		if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
355 			return -EFAULT;
356 next:
357 		i++;
358 	}
359 	return 0;
360 }
361 
362 /*
363  * Read routine for zcore character device
364  * First 4K are dump header
365  * Next 32MB are HSA Memory
366  * Rest is read from absolute Memory
367  */
zcore_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)368 static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
369 			  loff_t *ppos)
370 {
371 	unsigned long mem_start; /* Start address in memory */
372 	size_t mem_offs;	 /* Offset in dump memory */
373 	size_t hdr_count;	 /* Size of header part of output buffer */
374 	size_t size;
375 	int rc;
376 
377 	mutex_lock(&zcore_mutex);
378 
379 	if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
380 		rc = -EINVAL;
381 		goto fail;
382 	}
383 
384 	count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
385 
386 	/* Copy dump header */
387 	if (*ppos < HEADER_SIZE) {
388 		size = min(count, (size_t) (HEADER_SIZE - *ppos));
389 		if (copy_to_user(buf, &zcore_header + *ppos, size)) {
390 			rc = -EFAULT;
391 			goto fail;
392 		}
393 		hdr_count = size;
394 		mem_start = 0;
395 	} else {
396 		hdr_count = 0;
397 		mem_start = *ppos - HEADER_SIZE;
398 	}
399 
400 	mem_offs = 0;
401 
402 	/* Copy from HSA data */
403 	if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
404 		size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
405 			   - mem_start));
406 		rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
407 		if (rc)
408 			goto fail;
409 
410 		mem_offs += size;
411 	}
412 
413 	/* Copy from real mem */
414 	size = count - mem_offs - hdr_count;
415 	rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
416 			      size);
417 	if (rc)
418 		goto fail;
419 
420 	/*
421 	 * Since s390 dump analysis tools like lcrash or crash
422 	 * expect register sets in the prefix pages of the cpus,
423 	 * we copy them into the read buffer, if necessary.
424 	 * buf + hdr_count: Start of memory part of output buffer
425 	 * mem_start: Start memory address to copy from
426 	 * count - hdr_count: Size of memory area to copy
427 	 */
428 	if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
429 		rc = -EFAULT;
430 		goto fail;
431 	}
432 	*ppos += count;
433 fail:
434 	mutex_unlock(&zcore_mutex);
435 	return (rc < 0) ? rc : count;
436 }
437 
zcore_open(struct inode * inode,struct file * filp)438 static int zcore_open(struct inode *inode, struct file *filp)
439 {
440 	if (!hsa_available)
441 		return -ENODATA;
442 	else
443 		return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
444 }
445 
zcore_release(struct inode * inode,struct file * filep)446 static int zcore_release(struct inode *inode, struct file *filep)
447 {
448 	diag308(DIAG308_REL_HSA, NULL);
449 	hsa_available = 0;
450 	return 0;
451 }
452 
zcore_lseek(struct file * file,loff_t offset,int orig)453 static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
454 {
455 	loff_t rc;
456 
457 	mutex_lock(&zcore_mutex);
458 	switch (orig) {
459 	case 0:
460 		file->f_pos = offset;
461 		rc = file->f_pos;
462 		break;
463 	case 1:
464 		file->f_pos += offset;
465 		rc = file->f_pos;
466 		break;
467 	default:
468 		rc = -EINVAL;
469 	}
470 	mutex_unlock(&zcore_mutex);
471 	return rc;
472 }
473 
474 static const struct file_operations zcore_fops = {
475 	.owner		= THIS_MODULE,
476 	.llseek		= zcore_lseek,
477 	.read		= zcore_read,
478 	.open		= zcore_open,
479 	.release	= zcore_release,
480 };
481 
zcore_memmap_read(struct file * filp,char __user * buf,size_t count,loff_t * ppos)482 static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
483 				 size_t count, loff_t *ppos)
484 {
485 	return simple_read_from_buffer(buf, count, ppos, filp->private_data,
486 				       MEMORY_CHUNKS * CHUNK_INFO_SIZE);
487 }
488 
zcore_memmap_open(struct inode * inode,struct file * filp)489 static int zcore_memmap_open(struct inode *inode, struct file *filp)
490 {
491 	int i;
492 	char *buf;
493 	struct mem_chunk *chunk_array;
494 
495 	chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
496 			      GFP_KERNEL);
497 	if (!chunk_array)
498 		return -ENOMEM;
499 	detect_memory_layout(chunk_array);
500 	buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
501 	if (!buf) {
502 		kfree(chunk_array);
503 		return -ENOMEM;
504 	}
505 	for (i = 0; i < MEMORY_CHUNKS; i++) {
506 		sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ",
507 			(unsigned long long) chunk_array[i].addr,
508 			(unsigned long long) chunk_array[i].size);
509 		if (chunk_array[i].size == 0)
510 			break;
511 	}
512 	kfree(chunk_array);
513 	filp->private_data = buf;
514 	return 0;
515 }
516 
zcore_memmap_release(struct inode * inode,struct file * filp)517 static int zcore_memmap_release(struct inode *inode, struct file *filp)
518 {
519 	kfree(filp->private_data);
520 	return 0;
521 }
522 
523 static const struct file_operations zcore_memmap_fops = {
524 	.owner		= THIS_MODULE,
525 	.read		= zcore_memmap_read,
526 	.open		= zcore_memmap_open,
527 	.release	= zcore_memmap_release,
528 };
529 
530 
set_s390_lc_mask(union save_area * map)531 static void __init set_s390_lc_mask(union save_area *map)
532 {
533 	memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save));
534 	memset(&map->s390.timer, 0xff, sizeof(map->s390.timer));
535 	memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp));
536 	memset(&map->s390.psw, 0xff, sizeof(map->s390.psw));
537 	memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg));
538 	memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs));
539 	memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs));
540 	memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs));
541 	memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs));
542 }
543 
set_s390x_lc_mask(union save_area * map)544 static void __init set_s390x_lc_mask(union save_area *map)
545 {
546 	memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs));
547 	memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs));
548 	memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw));
549 	memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg));
550 	memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg));
551 	memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg));
552 	memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer));
553 	memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp));
554 	memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs));
555 	memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs));
556 }
557 
558 /*
559  * Initialize dump globals for a given architecture
560  */
sys_info_init(enum arch_id arch)561 static int __init sys_info_init(enum arch_id arch)
562 {
563 	int rc;
564 
565 	switch (arch) {
566 	case ARCH_S390X:
567 		pr_alert("DETECTED 'S390X (64 bit) OS'\n");
568 		sys_info.sa_base = SAVE_AREA_BASE_S390X;
569 		sys_info.sa_size = sizeof(struct save_area_s390x);
570 		set_s390x_lc_mask(&sys_info.lc_mask);
571 		break;
572 	case ARCH_S390:
573 		pr_alert("DETECTED 'S390 (32 bit) OS'\n");
574 		sys_info.sa_base = SAVE_AREA_BASE_S390;
575 		sys_info.sa_size = sizeof(struct save_area_s390);
576 		set_s390_lc_mask(&sys_info.lc_mask);
577 		break;
578 	default:
579 		pr_alert("0x%x is an unknown architecture.\n",arch);
580 		return -EINVAL;
581 	}
582 	sys_info.arch = arch;
583 	rc = init_cpu_info(arch);
584 	if (rc)
585 		return rc;
586 	sys_info.mem_size = real_memory_size;
587 
588 	return 0;
589 }
590 
check_sdias(void)591 static int __init check_sdias(void)
592 {
593 	int rc, act_hsa_size;
594 
595 	rc = sclp_sdias_blk_count();
596 	if (rc < 0) {
597 		TRACE("Could not determine HSA size\n");
598 		return rc;
599 	}
600 	act_hsa_size = (rc - 1) * PAGE_SIZE;
601 	if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
602 		TRACE("HSA size too small: %i\n", act_hsa_size);
603 		return -EINVAL;
604 	}
605 	return 0;
606 }
607 
get_mem_size(unsigned long * mem)608 static int __init get_mem_size(unsigned long *mem)
609 {
610 	int i;
611 	struct mem_chunk *chunk_array;
612 
613 	chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
614 			      GFP_KERNEL);
615 	if (!chunk_array)
616 		return -ENOMEM;
617 	detect_memory_layout(chunk_array);
618 	for (i = 0; i < MEMORY_CHUNKS; i++) {
619 		if (chunk_array[i].size == 0)
620 			break;
621 		*mem += chunk_array[i].size;
622 	}
623 	kfree(chunk_array);
624 	return 0;
625 }
626 
zcore_header_init(int arch,struct zcore_header * hdr)627 static int __init zcore_header_init(int arch, struct zcore_header *hdr)
628 {
629 	int rc;
630 	unsigned long memory = 0;
631 
632 	if (arch == ARCH_S390X)
633 		hdr->arch_id = DUMP_ARCH_S390X;
634 	else
635 		hdr->arch_id = DUMP_ARCH_S390;
636 	rc = get_mem_size(&memory);
637 	if (rc)
638 		return rc;
639 	hdr->mem_size = memory;
640 	hdr->rmem_size = memory;
641 	hdr->mem_end = sys_info.mem_size;
642 	hdr->num_pages = memory / PAGE_SIZE;
643 	hdr->tod = get_clock();
644 	get_cpu_id(&hdr->cpu_id);
645 	return 0;
646 }
647 
zcore_init(void)648 static int __init zcore_init(void)
649 {
650 	unsigned char arch;
651 	int rc;
652 
653 	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
654 		return -ENODATA;
655 
656 	zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
657 	debug_register_view(zcore_dbf, &debug_sprintf_view);
658 	debug_set_level(zcore_dbf, 6);
659 
660 	TRACE("devno:  %x\n", ipl_info.data.fcp.dev_id.devno);
661 	TRACE("wwpn:   %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
662 	TRACE("lun:    %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
663 
664 	rc = sclp_sdias_init();
665 	if (rc)
666 		goto fail;
667 
668 	rc = check_sdias();
669 	if (rc)
670 		goto fail;
671 
672 	rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
673 	if (rc)
674 		goto fail;
675 
676 #ifndef __s390x__
677 	if (arch == ARCH_S390X) {
678 		pr_alert("The 32-bit dump tool cannot be used for a "
679 			 "64-bit system\n");
680 		rc = -EINVAL;
681 		goto fail;
682 	}
683 #endif
684 
685 	rc = sys_info_init(arch);
686 	if (rc)
687 		goto fail;
688 
689 	rc = zcore_header_init(arch, &zcore_header);
690 	if (rc)
691 		goto fail;
692 
693 	zcore_dir = debugfs_create_dir("zcore" , NULL);
694 	if (!zcore_dir) {
695 		rc = -ENOMEM;
696 		goto fail;
697 	}
698 	zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
699 					 &zcore_fops);
700 	if (!zcore_file) {
701 		rc = -ENOMEM;
702 		goto fail_dir;
703 	}
704 	zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
705 						NULL, &zcore_memmap_fops);
706 	if (!zcore_memmap_file) {
707 		rc = -ENOMEM;
708 		goto fail_file;
709 	}
710 	hsa_available = 1;
711 	return 0;
712 
713 fail_file:
714 	debugfs_remove(zcore_file);
715 fail_dir:
716 	debugfs_remove(zcore_dir);
717 fail:
718 	diag308(DIAG308_REL_HSA, NULL);
719 	return rc;
720 }
721 
zcore_exit(void)722 static void __exit zcore_exit(void)
723 {
724 	debug_unregister(zcore_dbf);
725 	sclp_sdias_exit();
726 	diag308(DIAG308_REL_HSA, NULL);
727 }
728 
729 MODULE_AUTHOR("Copyright IBM Corp. 2003,2007");
730 MODULE_DESCRIPTION("zcore module for zfcpdump support");
731 MODULE_LICENSE("GPL");
732 
733 subsys_initcall(zcore_init);
734 module_exit(zcore_exit);
735