• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  drivers/s390/char/sclp_cmd.c
3  *
4  *    Copyright IBM Corp. 2007
5  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
6  *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
7  */
8 
9 #define KMSG_COMPONENT "sclp_cmd"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/completion.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/mm.h>
18 #include <linux/mmzone.h>
19 #include <linux/memory.h>
20 #include <asm/chpid.h>
21 #include <asm/sclp.h>
22 #include <asm/setup.h>
23 
24 #include "sclp.h"
25 
26 #define SCLP_CMDW_READ_SCP_INFO		0x00020001
27 #define SCLP_CMDW_READ_SCP_INFO_FORCED	0x00120001
28 
29 struct read_info_sccb {
30 	struct	sccb_header header;	/* 0-7 */
31 	u16	rnmax;			/* 8-9 */
32 	u8	rnsize;			/* 10 */
33 	u8	_reserved0[24 - 11];	/* 11-15 */
34 	u8	loadparm[8];		/* 24-31 */
35 	u8	_reserved1[48 - 32];	/* 32-47 */
36 	u64	facilities;		/* 48-55 */
37 	u8	_reserved2[84 - 56];	/* 56-83 */
38 	u8	fac84;			/* 84 */
39 	u8	_reserved3[91 - 85];	/* 85-90 */
40 	u8	flags;			/* 91 */
41 	u8	_reserved4[100 - 92];	/* 92-99 */
42 	u32	rnsize2;		/* 100-103 */
43 	u64	rnmax2;			/* 104-111 */
44 	u8	_reserved5[4096 - 112];	/* 112-4095 */
45 } __attribute__((packed, aligned(PAGE_SIZE)));
46 
47 static struct read_info_sccb __initdata early_read_info_sccb;
48 static int __initdata early_read_info_sccb_valid;
49 
50 u64 sclp_facilities;
51 static u8 sclp_fac84;
52 static unsigned long long rzm;
53 static unsigned long long rnmax;
54 
sclp_cmd_sync_early(sclp_cmdw_t cmd,void * sccb)55 static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
56 {
57 	int rc;
58 
59 	__ctl_set_bit(0, 9);
60 	rc = sclp_service_call(cmd, sccb);
61 	if (rc)
62 		goto out;
63 	__load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
64 			PSW_MASK_WAIT | PSW_DEFAULT_KEY);
65 	local_irq_disable();
66 out:
67 	/* Contents of the sccb might have changed. */
68 	barrier();
69 	__ctl_clear_bit(0, 9);
70 	return rc;
71 }
72 
sclp_read_info_early(void)73 static void __init sclp_read_info_early(void)
74 {
75 	int rc;
76 	int i;
77 	struct read_info_sccb *sccb;
78 	sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
79 				  SCLP_CMDW_READ_SCP_INFO};
80 
81 	sccb = &early_read_info_sccb;
82 	for (i = 0; i < ARRAY_SIZE(commands); i++) {
83 		do {
84 			memset(sccb, 0, sizeof(*sccb));
85 			sccb->header.length = sizeof(*sccb);
86 			sccb->header.control_mask[2] = 0x80;
87 			rc = sclp_cmd_sync_early(commands[i], sccb);
88 		} while (rc == -EBUSY);
89 
90 		if (rc)
91 			break;
92 		if (sccb->header.response_code == 0x10) {
93 			early_read_info_sccb_valid = 1;
94 			break;
95 		}
96 		if (sccb->header.response_code != 0x1f0)
97 			break;
98 	}
99 }
100 
sclp_facilities_detect(void)101 void __init sclp_facilities_detect(void)
102 {
103 	struct read_info_sccb *sccb;
104 
105 	sclp_read_info_early();
106 	if (!early_read_info_sccb_valid)
107 		return;
108 
109 	sccb = &early_read_info_sccb;
110 	sclp_facilities = sccb->facilities;
111 	sclp_fac84 = sccb->fac84;
112 	rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
113 	rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
114 	rzm <<= 20;
115 }
116 
sclp_get_rnmax(void)117 unsigned long long sclp_get_rnmax(void)
118 {
119 	return rnmax;
120 }
121 
sclp_get_rzm(void)122 unsigned long long sclp_get_rzm(void)
123 {
124 	return rzm;
125 }
126 
127 /*
128  * This function will be called after sclp_facilities_detect(), which gets
129  * called from early.c code. Therefore the sccb should have valid contents.
130  */
sclp_get_ipl_info(struct sclp_ipl_info * info)131 void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
132 {
133 	struct read_info_sccb *sccb;
134 
135 	if (!early_read_info_sccb_valid)
136 		return;
137 	sccb = &early_read_info_sccb;
138 	info->is_valid = 1;
139 	if (sccb->flags & 0x2)
140 		info->has_dump = 1;
141 	memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
142 }
143 
sclp_sync_callback(struct sclp_req * req,void * data)144 static void sclp_sync_callback(struct sclp_req *req, void *data)
145 {
146 	struct completion *completion = data;
147 
148 	complete(completion);
149 }
150 
do_sync_request(sclp_cmdw_t cmd,void * sccb)151 static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
152 {
153 	struct completion completion;
154 	struct sclp_req *request;
155 	int rc;
156 
157 	request = kzalloc(sizeof(*request), GFP_KERNEL);
158 	if (!request)
159 		return -ENOMEM;
160 	request->command = cmd;
161 	request->sccb = sccb;
162 	request->status = SCLP_REQ_FILLED;
163 	request->callback = sclp_sync_callback;
164 	request->callback_data = &completion;
165 	init_completion(&completion);
166 
167 	/* Perform sclp request. */
168 	rc = sclp_add_request(request);
169 	if (rc)
170 		goto out;
171 	wait_for_completion(&completion);
172 
173 	/* Check response. */
174 	if (request->status != SCLP_REQ_DONE) {
175 		pr_warning("sync request failed (cmd=0x%08x, "
176 			   "status=0x%02x)\n", cmd, request->status);
177 		rc = -EIO;
178 	}
179 out:
180 	kfree(request);
181 	return rc;
182 }
183 
184 /*
185  * CPU configuration related functions.
186  */
187 
188 #define SCLP_CMDW_READ_CPU_INFO		0x00010001
189 #define SCLP_CMDW_CONFIGURE_CPU		0x00110001
190 #define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
191 
192 struct read_cpu_info_sccb {
193 	struct	sccb_header header;
194 	u16	nr_configured;
195 	u16	offset_configured;
196 	u16	nr_standby;
197 	u16	offset_standby;
198 	u8	reserved[4096 - 16];
199 } __attribute__((packed, aligned(PAGE_SIZE)));
200 
sclp_fill_cpu_info(struct sclp_cpu_info * info,struct read_cpu_info_sccb * sccb)201 static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
202 			       struct read_cpu_info_sccb *sccb)
203 {
204 	char *page = (char *) sccb;
205 
206 	memset(info, 0, sizeof(*info));
207 	info->configured = sccb->nr_configured;
208 	info->standby = sccb->nr_standby;
209 	info->combined = sccb->nr_configured + sccb->nr_standby;
210 	info->has_cpu_type = sclp_fac84 & 0x1;
211 	memcpy(&info->cpu, page + sccb->offset_configured,
212 	       info->combined * sizeof(struct sclp_cpu_entry));
213 }
214 
sclp_get_cpu_info(struct sclp_cpu_info * info)215 int sclp_get_cpu_info(struct sclp_cpu_info *info)
216 {
217 	int rc;
218 	struct read_cpu_info_sccb *sccb;
219 
220 	if (!SCLP_HAS_CPU_INFO)
221 		return -EOPNOTSUPP;
222 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
223 	if (!sccb)
224 		return -ENOMEM;
225 	sccb->header.length = sizeof(*sccb);
226 	rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
227 	if (rc)
228 		goto out;
229 	if (sccb->header.response_code != 0x0010) {
230 		pr_warning("readcpuinfo failed (response=0x%04x)\n",
231 			   sccb->header.response_code);
232 		rc = -EIO;
233 		goto out;
234 	}
235 	sclp_fill_cpu_info(info, sccb);
236 out:
237 	free_page((unsigned long) sccb);
238 	return rc;
239 }
240 
241 struct cpu_configure_sccb {
242 	struct sccb_header header;
243 } __attribute__((packed, aligned(8)));
244 
do_cpu_configure(sclp_cmdw_t cmd)245 static int do_cpu_configure(sclp_cmdw_t cmd)
246 {
247 	struct cpu_configure_sccb *sccb;
248 	int rc;
249 
250 	if (!SCLP_HAS_CPU_RECONFIG)
251 		return -EOPNOTSUPP;
252 	/*
253 	 * This is not going to cross a page boundary since we force
254 	 * kmalloc to have a minimum alignment of 8 bytes on s390.
255 	 */
256 	sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
257 	if (!sccb)
258 		return -ENOMEM;
259 	sccb->header.length = sizeof(*sccb);
260 	rc = do_sync_request(cmd, sccb);
261 	if (rc)
262 		goto out;
263 	switch (sccb->header.response_code) {
264 	case 0x0020:
265 	case 0x0120:
266 		break;
267 	default:
268 		pr_warning("configure cpu failed (cmd=0x%08x, "
269 			   "response=0x%04x)\n", cmd,
270 			   sccb->header.response_code);
271 		rc = -EIO;
272 		break;
273 	}
274 out:
275 	kfree(sccb);
276 	return rc;
277 }
278 
sclp_cpu_configure(u8 cpu)279 int sclp_cpu_configure(u8 cpu)
280 {
281 	return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
282 }
283 
sclp_cpu_deconfigure(u8 cpu)284 int sclp_cpu_deconfigure(u8 cpu)
285 {
286 	return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
287 }
288 
289 #ifdef CONFIG_MEMORY_HOTPLUG
290 
291 static DEFINE_MUTEX(sclp_mem_mutex);
292 static LIST_HEAD(sclp_mem_list);
293 static u8 sclp_max_storage_id;
294 static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
295 
296 struct memory_increment {
297 	struct list_head list;
298 	u16 rn;
299 	int standby;
300 	int usecount;
301 };
302 
303 struct assign_storage_sccb {
304 	struct sccb_header header;
305 	u16 rn;
306 } __packed;
307 
rn2addr(u16 rn)308 static unsigned long long rn2addr(u16 rn)
309 {
310 	return (unsigned long long) (rn - 1) * rzm;
311 }
312 
do_assign_storage(sclp_cmdw_t cmd,u16 rn)313 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
314 {
315 	struct assign_storage_sccb *sccb;
316 	int rc;
317 
318 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
319 	if (!sccb)
320 		return -ENOMEM;
321 	sccb->header.length = PAGE_SIZE;
322 	sccb->rn = rn;
323 	rc = do_sync_request(cmd, sccb);
324 	if (rc)
325 		goto out;
326 	switch (sccb->header.response_code) {
327 	case 0x0020:
328 	case 0x0120:
329 		break;
330 	default:
331 		pr_warning("assign storage failed (cmd=0x%08x, "
332 			   "response=0x%04x, rn=0x%04x)\n", cmd,
333 			   sccb->header.response_code, rn);
334 		rc = -EIO;
335 		break;
336 	}
337 out:
338 	free_page((unsigned long) sccb);
339 	return rc;
340 }
341 
sclp_assign_storage(u16 rn)342 static int sclp_assign_storage(u16 rn)
343 {
344 	return do_assign_storage(0x000d0001, rn);
345 }
346 
sclp_unassign_storage(u16 rn)347 static int sclp_unassign_storage(u16 rn)
348 {
349 	return do_assign_storage(0x000c0001, rn);
350 }
351 
352 struct attach_storage_sccb {
353 	struct sccb_header header;
354 	u16 :16;
355 	u16 assigned;
356 	u32 :32;
357 	u32 entries[0];
358 } __packed;
359 
sclp_attach_storage(u8 id)360 static int sclp_attach_storage(u8 id)
361 {
362 	struct attach_storage_sccb *sccb;
363 	int rc;
364 	int i;
365 
366 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
367 	if (!sccb)
368 		return -ENOMEM;
369 	sccb->header.length = PAGE_SIZE;
370 	rc = do_sync_request(0x00080001 | id << 8, sccb);
371 	if (rc)
372 		goto out;
373 	switch (sccb->header.response_code) {
374 	case 0x0020:
375 		set_bit(id, sclp_storage_ids);
376 		for (i = 0; i < sccb->assigned; i++)
377 			sclp_unassign_storage(sccb->entries[i] >> 16);
378 		break;
379 	default:
380 		rc = -EIO;
381 		break;
382 	}
383 out:
384 	free_page((unsigned long) sccb);
385 	return rc;
386 }
387 
sclp_mem_change_state(unsigned long start,unsigned long size,int online)388 static int sclp_mem_change_state(unsigned long start, unsigned long size,
389 				 int online)
390 {
391 	struct memory_increment *incr;
392 	unsigned long long istart;
393 	int rc = 0;
394 
395 	list_for_each_entry(incr, &sclp_mem_list, list) {
396 		istart = rn2addr(incr->rn);
397 		if (start + size - 1 < istart)
398 			break;
399 		if (start > istart + rzm - 1)
400 			continue;
401 		if (online) {
402 			if (incr->usecount++)
403 				continue;
404 			/*
405 			 * Don't break the loop if one assign fails. Loop may
406 			 * be walked again on CANCEL and we can't save
407 			 * information if state changed before or not.
408 			 * So continue and increase usecount for all increments.
409 			 */
410 			rc |= sclp_assign_storage(incr->rn);
411 		} else {
412 			if (--incr->usecount)
413 				continue;
414 			sclp_unassign_storage(incr->rn);
415 		}
416 	}
417 	return rc ? -EIO : 0;
418 }
419 
sclp_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)420 static int sclp_mem_notifier(struct notifier_block *nb,
421 			     unsigned long action, void *data)
422 {
423 	unsigned long start, size;
424 	struct memory_notify *arg;
425 	unsigned char id;
426 	int rc = 0;
427 
428 	arg = data;
429 	start = arg->start_pfn << PAGE_SHIFT;
430 	size = arg->nr_pages << PAGE_SHIFT;
431 	mutex_lock(&sclp_mem_mutex);
432 	for (id = 0; id <= sclp_max_storage_id; id++)
433 		if (!test_bit(id, sclp_storage_ids))
434 			sclp_attach_storage(id);
435 	switch (action) {
436 	case MEM_ONLINE:
437 	case MEM_GOING_OFFLINE:
438 	case MEM_CANCEL_OFFLINE:
439 		break;
440 	case MEM_GOING_ONLINE:
441 		rc = sclp_mem_change_state(start, size, 1);
442 		break;
443 	case MEM_CANCEL_ONLINE:
444 		sclp_mem_change_state(start, size, 0);
445 		break;
446 	case MEM_OFFLINE:
447 		sclp_mem_change_state(start, size, 0);
448 		break;
449 	default:
450 		rc = -EINVAL;
451 		break;
452 	}
453 	mutex_unlock(&sclp_mem_mutex);
454 	return rc ? NOTIFY_BAD : NOTIFY_OK;
455 }
456 
457 static struct notifier_block sclp_mem_nb = {
458 	.notifier_call = sclp_mem_notifier,
459 };
460 
add_memory_merged(u16 rn)461 static void __init add_memory_merged(u16 rn)
462 {
463 	static u16 first_rn, num;
464 	unsigned long long start, size;
465 
466 	if (rn && first_rn && (first_rn + num == rn)) {
467 		num++;
468 		return;
469 	}
470 	if (!first_rn)
471 		goto skip_add;
472 	start = rn2addr(first_rn);
473 	size = (unsigned long long ) num * rzm;
474 	if (start >= VMEM_MAX_PHYS)
475 		goto skip_add;
476 	if (start + size > VMEM_MAX_PHYS)
477 		size = VMEM_MAX_PHYS - start;
478 	if (memory_end_set && (start >= memory_end))
479 		goto skip_add;
480 	if (memory_end_set && (start + size > memory_end))
481 		size = memory_end - start;
482 	add_memory(0, start, size);
483 skip_add:
484 	first_rn = rn;
485 	num = 1;
486 }
487 
sclp_add_standby_memory(void)488 static void __init sclp_add_standby_memory(void)
489 {
490 	struct memory_increment *incr;
491 
492 	list_for_each_entry(incr, &sclp_mem_list, list)
493 		if (incr->standby)
494 			add_memory_merged(incr->rn);
495 	add_memory_merged(0);
496 }
497 
insert_increment(u16 rn,int standby,int assigned)498 static void __init insert_increment(u16 rn, int standby, int assigned)
499 {
500 	struct memory_increment *incr, *new_incr;
501 	struct list_head *prev;
502 	u16 last_rn;
503 
504 	new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
505 	if (!new_incr)
506 		return;
507 	new_incr->rn = rn;
508 	new_incr->standby = standby;
509 	last_rn = 0;
510 	prev = &sclp_mem_list;
511 	list_for_each_entry(incr, &sclp_mem_list, list) {
512 		if (assigned && incr->rn > rn)
513 			break;
514 		if (!assigned && incr->rn - last_rn > 1)
515 			break;
516 		last_rn = incr->rn;
517 		prev = &incr->list;
518 	}
519 	if (!assigned)
520 		new_incr->rn = last_rn + 1;
521 	if (new_incr->rn > rnmax) {
522 		kfree(new_incr);
523 		return;
524 	}
525 	list_add(&new_incr->list, prev);
526 }
527 
528 struct read_storage_sccb {
529 	struct sccb_header header;
530 	u16 max_id;
531 	u16 assigned;
532 	u16 standby;
533 	u16 :16;
534 	u32 entries[0];
535 } __packed;
536 
sclp_detect_standby_memory(void)537 static int __init sclp_detect_standby_memory(void)
538 {
539 	struct read_storage_sccb *sccb;
540 	int i, id, assigned, rc;
541 
542 	if (!early_read_info_sccb_valid)
543 		return 0;
544 	if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
545 		return 0;
546 	rc = -ENOMEM;
547 	sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
548 	if (!sccb)
549 		goto out;
550 	assigned = 0;
551 	for (id = 0; id <= sclp_max_storage_id; id++) {
552 		memset(sccb, 0, PAGE_SIZE);
553 		sccb->header.length = PAGE_SIZE;
554 		rc = do_sync_request(0x00040001 | id << 8, sccb);
555 		if (rc)
556 			goto out;
557 		switch (sccb->header.response_code) {
558 		case 0x0010:
559 			set_bit(id, sclp_storage_ids);
560 			for (i = 0; i < sccb->assigned; i++) {
561 				if (!sccb->entries[i])
562 					continue;
563 				assigned++;
564 				insert_increment(sccb->entries[i] >> 16, 0, 1);
565 			}
566 			break;
567 		case 0x0310:
568 			break;
569 		case 0x0410:
570 			for (i = 0; i < sccb->assigned; i++) {
571 				if (!sccb->entries[i])
572 					continue;
573 				assigned++;
574 				insert_increment(sccb->entries[i] >> 16, 1, 1);
575 			}
576 			break;
577 		default:
578 			rc = -EIO;
579 			break;
580 		}
581 		if (!rc)
582 			sclp_max_storage_id = sccb->max_id;
583 	}
584 	if (rc || list_empty(&sclp_mem_list))
585 		goto out;
586 	for (i = 1; i <= rnmax - assigned; i++)
587 		insert_increment(0, 1, 0);
588 	rc = register_memory_notifier(&sclp_mem_nb);
589 	if (rc)
590 		goto out;
591 	sclp_add_standby_memory();
592 out:
593 	free_page((unsigned long) sccb);
594 	return rc;
595 }
596 __initcall(sclp_detect_standby_memory);
597 
598 #endif /* CONFIG_MEMORY_HOTPLUG */
599 
600 /*
601  * Channel path configuration related functions.
602  */
603 
604 #define SCLP_CMDW_CONFIGURE_CHPATH		0x000f0001
605 #define SCLP_CMDW_DECONFIGURE_CHPATH		0x000e0001
606 #define SCLP_CMDW_READ_CHPATH_INFORMATION	0x00030001
607 
608 struct chp_cfg_sccb {
609 	struct sccb_header header;
610 	u8 ccm;
611 	u8 reserved[6];
612 	u8 cssid;
613 } __attribute__((packed));
614 
do_chp_configure(sclp_cmdw_t cmd)615 static int do_chp_configure(sclp_cmdw_t cmd)
616 {
617 	struct chp_cfg_sccb *sccb;
618 	int rc;
619 
620 	if (!SCLP_HAS_CHP_RECONFIG)
621 		return -EOPNOTSUPP;
622 	/* Prepare sccb. */
623 	sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
624 	if (!sccb)
625 		return -ENOMEM;
626 	sccb->header.length = sizeof(*sccb);
627 	rc = do_sync_request(cmd, sccb);
628 	if (rc)
629 		goto out;
630 	switch (sccb->header.response_code) {
631 	case 0x0020:
632 	case 0x0120:
633 	case 0x0440:
634 	case 0x0450:
635 		break;
636 	default:
637 		pr_warning("configure channel-path failed "
638 			   "(cmd=0x%08x, response=0x%04x)\n", cmd,
639 			   sccb->header.response_code);
640 		rc = -EIO;
641 		break;
642 	}
643 out:
644 	free_page((unsigned long) sccb);
645 	return rc;
646 }
647 
648 /**
649  * sclp_chp_configure - perform configure channel-path sclp command
650  * @chpid: channel-path ID
651  *
652  * Perform configure channel-path command sclp command for specified chpid.
653  * Return 0 after command successfully finished, non-zero otherwise.
654  */
sclp_chp_configure(struct chp_id chpid)655 int sclp_chp_configure(struct chp_id chpid)
656 {
657 	return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
658 }
659 
660 /**
661  * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
662  * @chpid: channel-path ID
663  *
664  * Perform deconfigure channel-path command sclp command for specified chpid
665  * and wait for completion. On success return 0. Return non-zero otherwise.
666  */
sclp_chp_deconfigure(struct chp_id chpid)667 int sclp_chp_deconfigure(struct chp_id chpid)
668 {
669 	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
670 }
671 
672 struct chp_info_sccb {
673 	struct sccb_header header;
674 	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
675 	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
676 	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
677 	u8 ccm;
678 	u8 reserved[6];
679 	u8 cssid;
680 } __attribute__((packed));
681 
682 /**
683  * sclp_chp_read_info - perform read channel-path information sclp command
684  * @info: resulting channel-path information data
685  *
686  * Perform read channel-path information sclp command and wait for completion.
687  * On success, store channel-path information in @info and return 0. Return
688  * non-zero otherwise.
689  */
sclp_chp_read_info(struct sclp_chp_info * info)690 int sclp_chp_read_info(struct sclp_chp_info *info)
691 {
692 	struct chp_info_sccb *sccb;
693 	int rc;
694 
695 	if (!SCLP_HAS_CHP_INFO)
696 		return -EOPNOTSUPP;
697 	/* Prepare sccb. */
698 	sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
699 	if (!sccb)
700 		return -ENOMEM;
701 	sccb->header.length = sizeof(*sccb);
702 	rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
703 	if (rc)
704 		goto out;
705 	if (sccb->header.response_code != 0x0010) {
706 		pr_warning("read channel-path info failed "
707 			   "(response=0x%04x)\n", sccb->header.response_code);
708 		rc = -EIO;
709 		goto out;
710 	}
711 	memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
712 	memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
713 	memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
714 out:
715 	free_page((unsigned long) sccb);
716 	return rc;
717 }
718