1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SCLP VT220 terminal driver.
4 *
5 * Copyright IBM Corp. 2003, 2009
6 *
7 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/spinlock.h>
12 #include <linux/panic_notifier.h>
13 #include <linux/list.h>
14 #include <linux/wait.h>
15 #include <linux/timer.h>
16 #include <linux/kernel.h>
17 #include <linux/sysrq.h>
18 #include <linux/tty.h>
19 #include <linux/tty_driver.h>
20 #include <linux/tty_flip.h>
21 #include <linux/errno.h>
22 #include <linux/mm.h>
23 #include <linux/major.h>
24 #include <linux/console.h>
25 #include <linux/kdev_t.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/reboot.h>
29 #include <linux/slab.h>
30
31 #include <linux/uaccess.h>
32 #include "sclp.h"
33 #include "ctrlchar.h"
34
35 #define SCLP_VT220_MAJOR TTY_MAJOR
36 #define SCLP_VT220_MINOR 65
37 #define SCLP_VT220_DRIVER_NAME "sclp_vt220"
38 #define SCLP_VT220_DEVICE_NAME "ttysclp"
39 #define SCLP_VT220_CONSOLE_NAME "ttysclp"
40 #define SCLP_VT220_CONSOLE_INDEX 0 /* console=ttysclp0 */
41
42 /* Representation of a single write request */
43 struct sclp_vt220_request {
44 struct list_head list;
45 struct sclp_req sclp_req;
46 int retry_count;
47 };
48
49 /* VT220 SCCB */
50 struct sclp_vt220_sccb {
51 struct sccb_header header;
52 struct evbuf_header evbuf;
53 };
54
55 #define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
56 sizeof(struct sclp_vt220_request) - \
57 sizeof(struct sclp_vt220_sccb))
58
59 /* Structures and data needed to register tty driver */
60 static struct tty_driver *sclp_vt220_driver;
61
62 static struct tty_port sclp_vt220_port;
63
64 /* Lock to protect internal data from concurrent access */
65 static DEFINE_SPINLOCK(sclp_vt220_lock);
66
67 /* List of empty pages to be used as write request buffers */
68 static LIST_HEAD(sclp_vt220_empty);
69
70 /* List of pending requests */
71 static LIST_HEAD(sclp_vt220_outqueue);
72
73 /* Flag that output queue is currently running */
74 static int sclp_vt220_queue_running;
75
76 /* Timer used for delaying write requests to merge subsequent messages into
77 * a single buffer */
78 static struct timer_list sclp_vt220_timer;
79
80 /* Pointer to current request buffer which has been partially filled but not
81 * yet sent */
82 static struct sclp_vt220_request *sclp_vt220_current_request;
83
84 /* Number of characters in current request buffer */
85 static int sclp_vt220_buffered_chars;
86
87 /* Counter controlling core driver initialization. */
88 static int __initdata sclp_vt220_init_count;
89
90 /* Flag indicating that sclp_vt220_current_request should really
91 * have been already queued but wasn't because the SCLP was processing
92 * another buffer */
93 static int sclp_vt220_flush_later;
94
95 static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
96 static int __sclp_vt220_emit(struct sclp_vt220_request *request);
97 static void sclp_vt220_emit_current(void);
98
99 /* Registration structure for SCLP output event buffers */
100 static struct sclp_register sclp_vt220_register = {
101 .send_mask = EVTYP_VT220MSG_MASK,
102 };
103
104 /* Registration structure for SCLP input event buffers */
105 static struct sclp_register sclp_vt220_register_input = {
106 .receive_mask = EVTYP_VT220MSG_MASK,
107 .receiver_fn = sclp_vt220_receiver_fn,
108 };
109
110
111 /*
112 * Put provided request buffer back into queue and check emit pending
113 * buffers if necessary.
114 */
115 static void
sclp_vt220_process_queue(struct sclp_vt220_request * request)116 sclp_vt220_process_queue(struct sclp_vt220_request *request)
117 {
118 unsigned long flags;
119 void *page;
120
121 do {
122 /* Put buffer back to list of empty buffers */
123 page = request->sclp_req.sccb;
124 spin_lock_irqsave(&sclp_vt220_lock, flags);
125 /* Move request from outqueue to empty queue */
126 list_del(&request->list);
127 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
128 /* Check if there is a pending buffer on the out queue. */
129 request = NULL;
130 if (!list_empty(&sclp_vt220_outqueue))
131 request = list_entry(sclp_vt220_outqueue.next,
132 struct sclp_vt220_request, list);
133 if (!request) {
134 sclp_vt220_queue_running = 0;
135 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
136 break;
137 }
138 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
139 } while (__sclp_vt220_emit(request));
140 if (request == NULL && sclp_vt220_flush_later)
141 sclp_vt220_emit_current();
142 tty_port_tty_wakeup(&sclp_vt220_port);
143 }
144
145 #define SCLP_BUFFER_MAX_RETRY 1
146
147 /*
148 * Callback through which the result of a write request is reported by the
149 * SCLP.
150 */
151 static void
sclp_vt220_callback(struct sclp_req * request,void * data)152 sclp_vt220_callback(struct sclp_req *request, void *data)
153 {
154 struct sclp_vt220_request *vt220_request;
155 struct sclp_vt220_sccb *sccb;
156
157 vt220_request = (struct sclp_vt220_request *) data;
158 if (request->status == SCLP_REQ_FAILED) {
159 sclp_vt220_process_queue(vt220_request);
160 return;
161 }
162 sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
163
164 /* Check SCLP response code and choose suitable action */
165 switch (sccb->header.response_code) {
166 case 0x0020 :
167 break;
168
169 case 0x05f0: /* Target resource in improper state */
170 break;
171
172 case 0x0340: /* Contained SCLP equipment check */
173 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
174 break;
175 /* Remove processed buffers and requeue rest */
176 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
177 /* Not all buffers were processed */
178 sccb->header.response_code = 0x0000;
179 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
180 if (sclp_add_request(request) == 0)
181 return;
182 }
183 break;
184
185 case 0x0040: /* SCLP equipment check */
186 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
187 break;
188 sccb->header.response_code = 0x0000;
189 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
190 if (sclp_add_request(request) == 0)
191 return;
192 break;
193
194 default:
195 break;
196 }
197 sclp_vt220_process_queue(vt220_request);
198 }
199
200 /*
201 * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
202 * otherwise.
203 */
204 static int
__sclp_vt220_emit(struct sclp_vt220_request * request)205 __sclp_vt220_emit(struct sclp_vt220_request *request)
206 {
207 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
208 request->sclp_req.status = SCLP_REQ_FILLED;
209 request->sclp_req.callback = sclp_vt220_callback;
210 request->sclp_req.callback_data = (void *) request;
211
212 return sclp_add_request(&request->sclp_req);
213 }
214
215 /*
216 * Queue and emit current request.
217 */
218 static void
sclp_vt220_emit_current(void)219 sclp_vt220_emit_current(void)
220 {
221 unsigned long flags;
222 struct sclp_vt220_request *request;
223 struct sclp_vt220_sccb *sccb;
224
225 spin_lock_irqsave(&sclp_vt220_lock, flags);
226 if (sclp_vt220_current_request) {
227 sccb = (struct sclp_vt220_sccb *)
228 sclp_vt220_current_request->sclp_req.sccb;
229 /* Only emit buffers with content */
230 if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
231 list_add_tail(&sclp_vt220_current_request->list,
232 &sclp_vt220_outqueue);
233 sclp_vt220_current_request = NULL;
234 if (timer_pending(&sclp_vt220_timer))
235 del_timer(&sclp_vt220_timer);
236 }
237 sclp_vt220_flush_later = 0;
238 }
239 if (sclp_vt220_queue_running)
240 goto out_unlock;
241 if (list_empty(&sclp_vt220_outqueue))
242 goto out_unlock;
243 request = list_first_entry(&sclp_vt220_outqueue,
244 struct sclp_vt220_request, list);
245 sclp_vt220_queue_running = 1;
246 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
247
248 if (__sclp_vt220_emit(request))
249 sclp_vt220_process_queue(request);
250 return;
251 out_unlock:
252 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
253 }
254
255 #define SCLP_NORMAL_WRITE 0x00
256
257 /*
258 * Helper function to initialize a page with the sclp request structure.
259 */
260 static struct sclp_vt220_request *
sclp_vt220_initialize_page(void * page)261 sclp_vt220_initialize_page(void *page)
262 {
263 struct sclp_vt220_request *request;
264 struct sclp_vt220_sccb *sccb;
265
266 /* Place request structure at end of page */
267 request = ((struct sclp_vt220_request *)
268 ((addr_t) page + PAGE_SIZE)) - 1;
269 request->retry_count = 0;
270 request->sclp_req.sccb = page;
271 /* SCCB goes at start of page */
272 sccb = (struct sclp_vt220_sccb *) page;
273 memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
274 sccb->header.length = sizeof(struct sclp_vt220_sccb);
275 sccb->header.function_code = SCLP_NORMAL_WRITE;
276 sccb->header.response_code = 0x0000;
277 sccb->evbuf.type = EVTYP_VT220MSG;
278 sccb->evbuf.length = sizeof(struct evbuf_header);
279
280 return request;
281 }
282
283 static inline unsigned int
sclp_vt220_space_left(struct sclp_vt220_request * request)284 sclp_vt220_space_left(struct sclp_vt220_request *request)
285 {
286 struct sclp_vt220_sccb *sccb;
287 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
288 return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
289 sccb->header.length;
290 }
291
292 static inline unsigned int
sclp_vt220_chars_stored(struct sclp_vt220_request * request)293 sclp_vt220_chars_stored(struct sclp_vt220_request *request)
294 {
295 struct sclp_vt220_sccb *sccb;
296 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
297 return sccb->evbuf.length - sizeof(struct evbuf_header);
298 }
299
300 /*
301 * Add msg to buffer associated with request. Return the number of characters
302 * added.
303 */
304 static int
sclp_vt220_add_msg(struct sclp_vt220_request * request,const unsigned char * msg,int count,int convertlf)305 sclp_vt220_add_msg(struct sclp_vt220_request *request,
306 const unsigned char *msg, int count, int convertlf)
307 {
308 struct sclp_vt220_sccb *sccb;
309 void *buffer;
310 unsigned char c;
311 int from;
312 int to;
313
314 if (count > sclp_vt220_space_left(request))
315 count = sclp_vt220_space_left(request);
316 if (count <= 0)
317 return 0;
318
319 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
320 buffer = (void *) ((addr_t) sccb + sccb->header.length);
321
322 if (convertlf) {
323 /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
324 for (from=0, to=0;
325 (from < count) && (to < sclp_vt220_space_left(request));
326 from++) {
327 /* Retrieve character */
328 c = msg[from];
329 /* Perform conversion */
330 if (c == 0x0a) {
331 if (to + 1 < sclp_vt220_space_left(request)) {
332 ((unsigned char *) buffer)[to++] = c;
333 ((unsigned char *) buffer)[to++] = 0x0d;
334 } else
335 break;
336
337 } else
338 ((unsigned char *) buffer)[to++] = c;
339 }
340 sccb->header.length += to;
341 sccb->evbuf.length += to;
342 return from;
343 } else {
344 memcpy(buffer, (const void *) msg, count);
345 sccb->header.length += count;
346 sccb->evbuf.length += count;
347 return count;
348 }
349 }
350
351 /*
352 * Emit buffer after having waited long enough for more data to arrive.
353 */
354 static void
sclp_vt220_timeout(struct timer_list * unused)355 sclp_vt220_timeout(struct timer_list *unused)
356 {
357 sclp_vt220_emit_current();
358 }
359
360 #define BUFFER_MAX_DELAY HZ/20
361
362 /*
363 * Drop oldest console buffer if sclp_con_drop is set
364 */
365 static int
sclp_vt220_drop_buffer(void)366 sclp_vt220_drop_buffer(void)
367 {
368 struct list_head *list;
369 struct sclp_vt220_request *request;
370 void *page;
371
372 if (!sclp_console_drop)
373 return 0;
374 list = sclp_vt220_outqueue.next;
375 if (sclp_vt220_queue_running)
376 /* The first element is in I/O */
377 list = list->next;
378 if (list == &sclp_vt220_outqueue)
379 return 0;
380 list_del(list);
381 request = list_entry(list, struct sclp_vt220_request, list);
382 page = request->sclp_req.sccb;
383 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
384 return 1;
385 }
386
387 /*
388 * Internal implementation of the write function. Write COUNT bytes of data
389 * from memory at BUF
390 * to the SCLP interface. In case that the data does not fit into the current
391 * write buffer, emit the current one and allocate a new one. If there are no
392 * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
393 * is non-zero, the buffer will be scheduled for emitting after a timeout -
394 * otherwise the user has to explicitly call the flush function.
395 * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
396 * buffer should be converted to 0x0a 0x0d. After completion, return the number
397 * of bytes written.
398 */
399 static int
__sclp_vt220_write(const unsigned char * buf,int count,int do_schedule,int convertlf,int may_fail)400 __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
401 int convertlf, int may_fail)
402 {
403 unsigned long flags;
404 void *page;
405 int written;
406 int overall_written;
407
408 if (count <= 0)
409 return 0;
410 overall_written = 0;
411 spin_lock_irqsave(&sclp_vt220_lock, flags);
412 do {
413 /* Create an sclp output buffer if none exists yet */
414 if (sclp_vt220_current_request == NULL) {
415 if (list_empty(&sclp_vt220_empty))
416 sclp_console_full++;
417 while (list_empty(&sclp_vt220_empty)) {
418 if (may_fail)
419 goto out;
420 if (sclp_vt220_drop_buffer())
421 break;
422 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
423
424 sclp_sync_wait();
425 spin_lock_irqsave(&sclp_vt220_lock, flags);
426 }
427 page = (void *) sclp_vt220_empty.next;
428 list_del((struct list_head *) page);
429 sclp_vt220_current_request =
430 sclp_vt220_initialize_page(page);
431 }
432 /* Try to write the string to the current request buffer */
433 written = sclp_vt220_add_msg(sclp_vt220_current_request,
434 buf, count, convertlf);
435 overall_written += written;
436 if (written == count)
437 break;
438 /*
439 * Not all characters could be written to the current
440 * output buffer. Emit the buffer, create a new buffer
441 * and then output the rest of the string.
442 */
443 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
444 sclp_vt220_emit_current();
445 spin_lock_irqsave(&sclp_vt220_lock, flags);
446 buf += written;
447 count -= written;
448 } while (count > 0);
449 /* Setup timer to output current console buffer after some time */
450 if (sclp_vt220_current_request != NULL &&
451 !timer_pending(&sclp_vt220_timer) && do_schedule) {
452 sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
453 add_timer(&sclp_vt220_timer);
454 }
455 out:
456 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
457 return overall_written;
458 }
459
460 /*
461 * This routine is called by the kernel to write a series of
462 * characters to the tty device. The characters may come from
463 * user space or kernel space. This routine will return the
464 * number of characters actually accepted for writing.
465 */
466 static int
sclp_vt220_write(struct tty_struct * tty,const unsigned char * buf,int count)467 sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
468 {
469 return __sclp_vt220_write(buf, count, 1, 0, 1);
470 }
471
472 #define SCLP_VT220_SESSION_ENDED 0x01
473 #define SCLP_VT220_SESSION_STARTED 0x80
474 #define SCLP_VT220_SESSION_DATA 0x00
475
476 #ifdef CONFIG_MAGIC_SYSRQ
477
478 static int sysrq_pressed;
479 static struct sysrq_work sysrq;
480
sclp_vt220_reset_session(void)481 static void sclp_vt220_reset_session(void)
482 {
483 sysrq_pressed = 0;
484 }
485
sclp_vt220_handle_input(const char * buffer,unsigned int count)486 static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
487 {
488 int i;
489
490 for (i = 0; i < count; i++) {
491 /* Handle magic sys request */
492 if (buffer[i] == ('O' ^ 0100)) { /* CTRL-O */
493 /*
494 * If pressed again, reset sysrq_pressed
495 * and flip CTRL-O character
496 */
497 sysrq_pressed = !sysrq_pressed;
498 if (sysrq_pressed)
499 continue;
500 } else if (sysrq_pressed) {
501 sysrq.key = buffer[i];
502 schedule_sysrq_work(&sysrq);
503 sysrq_pressed = 0;
504 continue;
505 }
506 tty_insert_flip_char(&sclp_vt220_port, buffer[i], 0);
507 }
508 }
509
510 #else
511
sclp_vt220_reset_session(void)512 static void sclp_vt220_reset_session(void)
513 {
514 }
515
sclp_vt220_handle_input(const char * buffer,unsigned int count)516 static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
517 {
518 tty_insert_flip_string(&sclp_vt220_port, buffer, count);
519 }
520
521 #endif
522
523 /*
524 * Called by the SCLP to report incoming event buffers.
525 */
526 static void
sclp_vt220_receiver_fn(struct evbuf_header * evbuf)527 sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
528 {
529 char *buffer;
530 unsigned int count;
531
532 buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
533 count = evbuf->length - sizeof(struct evbuf_header);
534
535 switch (*buffer) {
536 case SCLP_VT220_SESSION_ENDED:
537 case SCLP_VT220_SESSION_STARTED:
538 sclp_vt220_reset_session();
539 break;
540 case SCLP_VT220_SESSION_DATA:
541 /* Send input to line discipline */
542 buffer++;
543 count--;
544 sclp_vt220_handle_input(buffer, count);
545 tty_flip_buffer_push(&sclp_vt220_port);
546 break;
547 }
548 }
549
550 /*
551 * This routine is called when a particular tty device is opened.
552 */
553 static int
sclp_vt220_open(struct tty_struct * tty,struct file * filp)554 sclp_vt220_open(struct tty_struct *tty, struct file *filp)
555 {
556 if (tty->count == 1) {
557 tty_port_tty_set(&sclp_vt220_port, tty);
558 if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
559 tty->winsize.ws_row = 24;
560 tty->winsize.ws_col = 80;
561 }
562 }
563 return 0;
564 }
565
566 /*
567 * This routine is called when a particular tty device is closed.
568 */
569 static void
sclp_vt220_close(struct tty_struct * tty,struct file * filp)570 sclp_vt220_close(struct tty_struct *tty, struct file *filp)
571 {
572 if (tty->count == 1)
573 tty_port_tty_set(&sclp_vt220_port, NULL);
574 }
575
576 /*
577 * This routine is called by the kernel to write a single
578 * character to the tty device. If the kernel uses this routine,
579 * it must call the flush_chars() routine (if defined) when it is
580 * done stuffing characters into the driver.
581 */
582 static int
sclp_vt220_put_char(struct tty_struct * tty,unsigned char ch)583 sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
584 {
585 return __sclp_vt220_write(&ch, 1, 0, 0, 1);
586 }
587
588 /*
589 * This routine is called by the kernel after it has written a
590 * series of characters to the tty device using put_char().
591 */
592 static void
sclp_vt220_flush_chars(struct tty_struct * tty)593 sclp_vt220_flush_chars(struct tty_struct *tty)
594 {
595 if (!sclp_vt220_queue_running)
596 sclp_vt220_emit_current();
597 else
598 sclp_vt220_flush_later = 1;
599 }
600
601 /*
602 * This routine returns the numbers of characters the tty driver
603 * will accept for queuing to be written. This number is subject
604 * to change as output buffers get emptied, or if the output flow
605 * control is acted.
606 */
607 static unsigned int
sclp_vt220_write_room(struct tty_struct * tty)608 sclp_vt220_write_room(struct tty_struct *tty)
609 {
610 unsigned long flags;
611 struct list_head *l;
612 unsigned int count;
613
614 spin_lock_irqsave(&sclp_vt220_lock, flags);
615 count = 0;
616 if (sclp_vt220_current_request != NULL)
617 count = sclp_vt220_space_left(sclp_vt220_current_request);
618 list_for_each(l, &sclp_vt220_empty)
619 count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
620 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
621 return count;
622 }
623
624 /*
625 * Return number of buffered chars.
626 */
627 static unsigned int
sclp_vt220_chars_in_buffer(struct tty_struct * tty)628 sclp_vt220_chars_in_buffer(struct tty_struct *tty)
629 {
630 unsigned long flags;
631 struct list_head *l;
632 struct sclp_vt220_request *r;
633 unsigned int count = 0;
634
635 spin_lock_irqsave(&sclp_vt220_lock, flags);
636 if (sclp_vt220_current_request != NULL)
637 count = sclp_vt220_chars_stored(sclp_vt220_current_request);
638 list_for_each(l, &sclp_vt220_outqueue) {
639 r = list_entry(l, struct sclp_vt220_request, list);
640 count += sclp_vt220_chars_stored(r);
641 }
642 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
643 return count;
644 }
645
646 /*
647 * Pass on all buffers to the hardware. Return only when there are no more
648 * buffers pending.
649 */
650 static void
sclp_vt220_flush_buffer(struct tty_struct * tty)651 sclp_vt220_flush_buffer(struct tty_struct *tty)
652 {
653 sclp_vt220_emit_current();
654 }
655
656 /* Release allocated pages. */
__sclp_vt220_free_pages(void)657 static void __init __sclp_vt220_free_pages(void)
658 {
659 struct list_head *page, *p;
660
661 list_for_each_safe(page, p, &sclp_vt220_empty) {
662 list_del(page);
663 free_page((unsigned long) page);
664 }
665 }
666
667 /* Release memory and unregister from sclp core. Controlled by init counting -
668 * only the last invoker will actually perform these actions. */
__sclp_vt220_cleanup(void)669 static void __init __sclp_vt220_cleanup(void)
670 {
671 sclp_vt220_init_count--;
672 if (sclp_vt220_init_count != 0)
673 return;
674 sclp_unregister(&sclp_vt220_register);
675 __sclp_vt220_free_pages();
676 tty_port_destroy(&sclp_vt220_port);
677 }
678
679 /* Allocate buffer pages and register with sclp core. Controlled by init
680 * counting - only the first invoker will actually perform these actions. */
__sclp_vt220_init(int num_pages)681 static int __init __sclp_vt220_init(int num_pages)
682 {
683 void *page;
684 int i;
685 int rc;
686
687 sclp_vt220_init_count++;
688 if (sclp_vt220_init_count != 1)
689 return 0;
690 timer_setup(&sclp_vt220_timer, sclp_vt220_timeout, 0);
691 tty_port_init(&sclp_vt220_port);
692 sclp_vt220_current_request = NULL;
693 sclp_vt220_buffered_chars = 0;
694 sclp_vt220_flush_later = 0;
695
696 /* Allocate pages for output buffering */
697 rc = -ENOMEM;
698 for (i = 0; i < num_pages; i++) {
699 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
700 if (!page)
701 goto out;
702 list_add_tail(page, &sclp_vt220_empty);
703 }
704 rc = sclp_register(&sclp_vt220_register);
705 out:
706 if (rc) {
707 __sclp_vt220_free_pages();
708 sclp_vt220_init_count--;
709 tty_port_destroy(&sclp_vt220_port);
710 }
711 return rc;
712 }
713
714 static const struct tty_operations sclp_vt220_ops = {
715 .open = sclp_vt220_open,
716 .close = sclp_vt220_close,
717 .write = sclp_vt220_write,
718 .put_char = sclp_vt220_put_char,
719 .flush_chars = sclp_vt220_flush_chars,
720 .write_room = sclp_vt220_write_room,
721 .chars_in_buffer = sclp_vt220_chars_in_buffer,
722 .flush_buffer = sclp_vt220_flush_buffer,
723 };
724
725 /*
726 * Register driver with SCLP and Linux and initialize internal tty structures.
727 */
sclp_vt220_tty_init(void)728 static int __init sclp_vt220_tty_init(void)
729 {
730 struct tty_driver *driver;
731 int rc;
732
733 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
734 * symmetry between VM and LPAR systems regarding ttyS1. */
735 driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
736 if (IS_ERR(driver))
737 return PTR_ERR(driver);
738 rc = __sclp_vt220_init(MAX_KMEM_PAGES);
739 if (rc)
740 goto out_driver;
741
742 driver->driver_name = SCLP_VT220_DRIVER_NAME;
743 driver->name = SCLP_VT220_DEVICE_NAME;
744 driver->major = SCLP_VT220_MAJOR;
745 driver->minor_start = SCLP_VT220_MINOR;
746 driver->type = TTY_DRIVER_TYPE_SYSTEM;
747 driver->subtype = SYSTEM_TYPE_TTY;
748 driver->init_termios = tty_std_termios;
749 tty_set_operations(driver, &sclp_vt220_ops);
750 tty_port_link_device(&sclp_vt220_port, driver, 0);
751
752 rc = tty_register_driver(driver);
753 if (rc)
754 goto out_init;
755 rc = sclp_register(&sclp_vt220_register_input);
756 if (rc)
757 goto out_reg;
758 sclp_vt220_driver = driver;
759 return 0;
760
761 out_reg:
762 tty_unregister_driver(driver);
763 out_init:
764 __sclp_vt220_cleanup();
765 out_driver:
766 tty_driver_kref_put(driver);
767 return rc;
768 }
769 __initcall(sclp_vt220_tty_init);
770
__sclp_vt220_flush_buffer(void)771 static void __sclp_vt220_flush_buffer(void)
772 {
773 unsigned long flags;
774
775 sclp_vt220_emit_current();
776 spin_lock_irqsave(&sclp_vt220_lock, flags);
777 if (timer_pending(&sclp_vt220_timer))
778 del_timer(&sclp_vt220_timer);
779 while (sclp_vt220_queue_running) {
780 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
781 sclp_sync_wait();
782 spin_lock_irqsave(&sclp_vt220_lock, flags);
783 }
784 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
785 }
786
787 #ifdef CONFIG_SCLP_VT220_CONSOLE
788
789 static void
sclp_vt220_con_write(struct console * con,const char * buf,unsigned int count)790 sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
791 {
792 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
793 }
794
795 static struct tty_driver *
sclp_vt220_con_device(struct console * c,int * index)796 sclp_vt220_con_device(struct console *c, int *index)
797 {
798 *index = 0;
799 return sclp_vt220_driver;
800 }
801
802 static int
sclp_vt220_notify(struct notifier_block * self,unsigned long event,void * data)803 sclp_vt220_notify(struct notifier_block *self,
804 unsigned long event, void *data)
805 {
806 __sclp_vt220_flush_buffer();
807 return NOTIFY_OK;
808 }
809
810 static struct notifier_block on_panic_nb = {
811 .notifier_call = sclp_vt220_notify,
812 .priority = 1,
813 };
814
815 static struct notifier_block on_reboot_nb = {
816 .notifier_call = sclp_vt220_notify,
817 .priority = 1,
818 };
819
820 /* Structure needed to register with printk */
821 static struct console sclp_vt220_console =
822 {
823 .name = SCLP_VT220_CONSOLE_NAME,
824 .write = sclp_vt220_con_write,
825 .device = sclp_vt220_con_device,
826 .flags = CON_PRINTBUFFER,
827 .index = SCLP_VT220_CONSOLE_INDEX
828 };
829
830 static int __init
sclp_vt220_con_init(void)831 sclp_vt220_con_init(void)
832 {
833 int rc;
834
835 rc = __sclp_vt220_init(sclp_console_pages);
836 if (rc)
837 return rc;
838 /* Attach linux console */
839 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
840 register_reboot_notifier(&on_reboot_nb);
841 register_console(&sclp_vt220_console);
842 return 0;
843 }
844
845 console_initcall(sclp_vt220_con_init);
846 #endif /* CONFIG_SCLP_VT220_CONSOLE */
847
848