1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SCLP VT220 terminal driver.
4 *
5 * Copyright IBM Corp. 2003, 2009
6 *
7 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/spinlock.h>
12 #include <linux/list.h>
13 #include <linux/wait.h>
14 #include <linux/timer.h>
15 #include <linux/kernel.h>
16 #include <linux/sysrq.h>
17 #include <linux/tty.h>
18 #include <linux/tty_driver.h>
19 #include <linux/tty_flip.h>
20 #include <linux/errno.h>
21 #include <linux/mm.h>
22 #include <linux/major.h>
23 #include <linux/console.h>
24 #include <linux/kdev_t.h>
25 #include <linux/interrupt.h>
26 #include <linux/init.h>
27 #include <linux/reboot.h>
28 #include <linux/slab.h>
29
30 #include <linux/uaccess.h>
31 #include "sclp.h"
32 #include "ctrlchar.h"
33
34 #define SCLP_VT220_MAJOR TTY_MAJOR
35 #define SCLP_VT220_MINOR 65
36 #define SCLP_VT220_DRIVER_NAME "sclp_vt220"
37 #define SCLP_VT220_DEVICE_NAME "ttysclp"
38 #define SCLP_VT220_CONSOLE_NAME "ttyS"
39 #define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
40
41 /* Representation of a single write request */
42 struct sclp_vt220_request {
43 struct list_head list;
44 struct sclp_req sclp_req;
45 int retry_count;
46 };
47
48 /* VT220 SCCB */
49 struct sclp_vt220_sccb {
50 struct sccb_header header;
51 struct evbuf_header evbuf;
52 };
53
54 #define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
55 sizeof(struct sclp_vt220_request) - \
56 sizeof(struct sclp_vt220_sccb))
57
58 /* Structures and data needed to register tty driver */
59 static struct tty_driver *sclp_vt220_driver;
60
61 static struct tty_port sclp_vt220_port;
62
63 /* Lock to protect internal data from concurrent access */
64 static spinlock_t sclp_vt220_lock;
65
66 /* List of empty pages to be used as write request buffers */
67 static struct list_head sclp_vt220_empty;
68
69 /* List of pending requests */
70 static struct list_head sclp_vt220_outqueue;
71
72 /* Suspend mode flag */
73 static int sclp_vt220_suspended;
74
75 /* Flag that output queue is currently running */
76 static int sclp_vt220_queue_running;
77
78 /* Timer used for delaying write requests to merge subsequent messages into
79 * a single buffer */
80 static struct timer_list sclp_vt220_timer;
81
82 /* Pointer to current request buffer which has been partially filled but not
83 * yet sent */
84 static struct sclp_vt220_request *sclp_vt220_current_request;
85
86 /* Number of characters in current request buffer */
87 static int sclp_vt220_buffered_chars;
88
89 /* Counter controlling core driver initialization. */
90 static int __initdata sclp_vt220_init_count;
91
92 /* Flag indicating that sclp_vt220_current_request should really
93 * have been already queued but wasn't because the SCLP was processing
94 * another buffer */
95 static int sclp_vt220_flush_later;
96
97 static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
98 static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
99 enum sclp_pm_event sclp_pm_event);
100 static int __sclp_vt220_emit(struct sclp_vt220_request *request);
101 static void sclp_vt220_emit_current(void);
102
103 /* Registration structure for SCLP output event buffers */
104 static struct sclp_register sclp_vt220_register = {
105 .send_mask = EVTYP_VT220MSG_MASK,
106 .pm_event_fn = sclp_vt220_pm_event_fn,
107 };
108
109 /* Registration structure for SCLP input event buffers */
110 static struct sclp_register sclp_vt220_register_input = {
111 .receive_mask = EVTYP_VT220MSG_MASK,
112 .receiver_fn = sclp_vt220_receiver_fn,
113 };
114
115
116 /*
117 * Put provided request buffer back into queue and check emit pending
118 * buffers if necessary.
119 */
120 static void
sclp_vt220_process_queue(struct sclp_vt220_request * request)121 sclp_vt220_process_queue(struct sclp_vt220_request *request)
122 {
123 unsigned long flags;
124 void *page;
125
126 do {
127 /* Put buffer back to list of empty buffers */
128 page = request->sclp_req.sccb;
129 spin_lock_irqsave(&sclp_vt220_lock, flags);
130 /* Move request from outqueue to empty queue */
131 list_del(&request->list);
132 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
133 /* Check if there is a pending buffer on the out queue. */
134 request = NULL;
135 if (!list_empty(&sclp_vt220_outqueue))
136 request = list_entry(sclp_vt220_outqueue.next,
137 struct sclp_vt220_request, list);
138 if (!request || sclp_vt220_suspended) {
139 sclp_vt220_queue_running = 0;
140 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
141 break;
142 }
143 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
144 } while (__sclp_vt220_emit(request));
145 if (request == NULL && sclp_vt220_flush_later)
146 sclp_vt220_emit_current();
147 tty_port_tty_wakeup(&sclp_vt220_port);
148 }
149
150 #define SCLP_BUFFER_MAX_RETRY 1
151
152 /*
153 * Callback through which the result of a write request is reported by the
154 * SCLP.
155 */
156 static void
sclp_vt220_callback(struct sclp_req * request,void * data)157 sclp_vt220_callback(struct sclp_req *request, void *data)
158 {
159 struct sclp_vt220_request *vt220_request;
160 struct sclp_vt220_sccb *sccb;
161
162 vt220_request = (struct sclp_vt220_request *) data;
163 if (request->status == SCLP_REQ_FAILED) {
164 sclp_vt220_process_queue(vt220_request);
165 return;
166 }
167 sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
168
169 /* Check SCLP response code and choose suitable action */
170 switch (sccb->header.response_code) {
171 case 0x0020 :
172 break;
173
174 case 0x05f0: /* Target resource in improper state */
175 break;
176
177 case 0x0340: /* Contained SCLP equipment check */
178 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
179 break;
180 /* Remove processed buffers and requeue rest */
181 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
182 /* Not all buffers were processed */
183 sccb->header.response_code = 0x0000;
184 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
185 if (sclp_add_request(request) == 0)
186 return;
187 }
188 break;
189
190 case 0x0040: /* SCLP equipment check */
191 if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
192 break;
193 sccb->header.response_code = 0x0000;
194 vt220_request->sclp_req.status = SCLP_REQ_FILLED;
195 if (sclp_add_request(request) == 0)
196 return;
197 break;
198
199 default:
200 break;
201 }
202 sclp_vt220_process_queue(vt220_request);
203 }
204
205 /*
206 * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
207 * otherwise.
208 */
209 static int
__sclp_vt220_emit(struct sclp_vt220_request * request)210 __sclp_vt220_emit(struct sclp_vt220_request *request)
211 {
212 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
213 request->sclp_req.status = SCLP_REQ_FILLED;
214 request->sclp_req.callback = sclp_vt220_callback;
215 request->sclp_req.callback_data = (void *) request;
216
217 return sclp_add_request(&request->sclp_req);
218 }
219
220 /*
221 * Queue and emit current request.
222 */
223 static void
sclp_vt220_emit_current(void)224 sclp_vt220_emit_current(void)
225 {
226 unsigned long flags;
227 struct sclp_vt220_request *request;
228 struct sclp_vt220_sccb *sccb;
229
230 spin_lock_irqsave(&sclp_vt220_lock, flags);
231 if (sclp_vt220_current_request) {
232 sccb = (struct sclp_vt220_sccb *)
233 sclp_vt220_current_request->sclp_req.sccb;
234 /* Only emit buffers with content */
235 if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
236 list_add_tail(&sclp_vt220_current_request->list,
237 &sclp_vt220_outqueue);
238 sclp_vt220_current_request = NULL;
239 if (timer_pending(&sclp_vt220_timer))
240 del_timer(&sclp_vt220_timer);
241 }
242 sclp_vt220_flush_later = 0;
243 }
244 if (sclp_vt220_queue_running || sclp_vt220_suspended)
245 goto out_unlock;
246 if (list_empty(&sclp_vt220_outqueue))
247 goto out_unlock;
248 request = list_first_entry(&sclp_vt220_outqueue,
249 struct sclp_vt220_request, list);
250 sclp_vt220_queue_running = 1;
251 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
252
253 if (__sclp_vt220_emit(request))
254 sclp_vt220_process_queue(request);
255 return;
256 out_unlock:
257 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
258 }
259
260 #define SCLP_NORMAL_WRITE 0x00
261
262 /*
263 * Helper function to initialize a page with the sclp request structure.
264 */
265 static struct sclp_vt220_request *
sclp_vt220_initialize_page(void * page)266 sclp_vt220_initialize_page(void *page)
267 {
268 struct sclp_vt220_request *request;
269 struct sclp_vt220_sccb *sccb;
270
271 /* Place request structure at end of page */
272 request = ((struct sclp_vt220_request *)
273 ((addr_t) page + PAGE_SIZE)) - 1;
274 request->retry_count = 0;
275 request->sclp_req.sccb = page;
276 /* SCCB goes at start of page */
277 sccb = (struct sclp_vt220_sccb *) page;
278 memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
279 sccb->header.length = sizeof(struct sclp_vt220_sccb);
280 sccb->header.function_code = SCLP_NORMAL_WRITE;
281 sccb->header.response_code = 0x0000;
282 sccb->evbuf.type = EVTYP_VT220MSG;
283 sccb->evbuf.length = sizeof(struct evbuf_header);
284
285 return request;
286 }
287
288 static inline unsigned int
sclp_vt220_space_left(struct sclp_vt220_request * request)289 sclp_vt220_space_left(struct sclp_vt220_request *request)
290 {
291 struct sclp_vt220_sccb *sccb;
292 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
293 return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
294 sccb->header.length;
295 }
296
297 static inline unsigned int
sclp_vt220_chars_stored(struct sclp_vt220_request * request)298 sclp_vt220_chars_stored(struct sclp_vt220_request *request)
299 {
300 struct sclp_vt220_sccb *sccb;
301 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
302 return sccb->evbuf.length - sizeof(struct evbuf_header);
303 }
304
305 /*
306 * Add msg to buffer associated with request. Return the number of characters
307 * added.
308 */
309 static int
sclp_vt220_add_msg(struct sclp_vt220_request * request,const unsigned char * msg,int count,int convertlf)310 sclp_vt220_add_msg(struct sclp_vt220_request *request,
311 const unsigned char *msg, int count, int convertlf)
312 {
313 struct sclp_vt220_sccb *sccb;
314 void *buffer;
315 unsigned char c;
316 int from;
317 int to;
318
319 if (count > sclp_vt220_space_left(request))
320 count = sclp_vt220_space_left(request);
321 if (count <= 0)
322 return 0;
323
324 sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
325 buffer = (void *) ((addr_t) sccb + sccb->header.length);
326
327 if (convertlf) {
328 /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
329 for (from=0, to=0;
330 (from < count) && (to < sclp_vt220_space_left(request));
331 from++) {
332 /* Retrieve character */
333 c = msg[from];
334 /* Perform conversion */
335 if (c == 0x0a) {
336 if (to + 1 < sclp_vt220_space_left(request)) {
337 ((unsigned char *) buffer)[to++] = c;
338 ((unsigned char *) buffer)[to++] = 0x0d;
339 } else
340 break;
341
342 } else
343 ((unsigned char *) buffer)[to++] = c;
344 }
345 sccb->header.length += to;
346 sccb->evbuf.length += to;
347 return from;
348 } else {
349 memcpy(buffer, (const void *) msg, count);
350 sccb->header.length += count;
351 sccb->evbuf.length += count;
352 return count;
353 }
354 }
355
356 /*
357 * Emit buffer after having waited long enough for more data to arrive.
358 */
359 static void
sclp_vt220_timeout(unsigned long data)360 sclp_vt220_timeout(unsigned long data)
361 {
362 sclp_vt220_emit_current();
363 }
364
365 #define BUFFER_MAX_DELAY HZ/20
366
367 /*
368 * Drop oldest console buffer if sclp_con_drop is set
369 */
370 static int
sclp_vt220_drop_buffer(void)371 sclp_vt220_drop_buffer(void)
372 {
373 struct list_head *list;
374 struct sclp_vt220_request *request;
375 void *page;
376
377 if (!sclp_console_drop)
378 return 0;
379 list = sclp_vt220_outqueue.next;
380 if (sclp_vt220_queue_running)
381 /* The first element is in I/O */
382 list = list->next;
383 if (list == &sclp_vt220_outqueue)
384 return 0;
385 list_del(list);
386 request = list_entry(list, struct sclp_vt220_request, list);
387 page = request->sclp_req.sccb;
388 list_add_tail((struct list_head *) page, &sclp_vt220_empty);
389 return 1;
390 }
391
392 /*
393 * Internal implementation of the write function. Write COUNT bytes of data
394 * from memory at BUF
395 * to the SCLP interface. In case that the data does not fit into the current
396 * write buffer, emit the current one and allocate a new one. If there are no
397 * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
398 * is non-zero, the buffer will be scheduled for emitting after a timeout -
399 * otherwise the user has to explicitly call the flush function.
400 * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
401 * buffer should be converted to 0x0a 0x0d. After completion, return the number
402 * of bytes written.
403 */
404 static int
__sclp_vt220_write(const unsigned char * buf,int count,int do_schedule,int convertlf,int may_fail)405 __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
406 int convertlf, int may_fail)
407 {
408 unsigned long flags;
409 void *page;
410 int written;
411 int overall_written;
412
413 if (count <= 0)
414 return 0;
415 overall_written = 0;
416 spin_lock_irqsave(&sclp_vt220_lock, flags);
417 do {
418 /* Create an sclp output buffer if none exists yet */
419 if (sclp_vt220_current_request == NULL) {
420 if (list_empty(&sclp_vt220_empty))
421 sclp_console_full++;
422 while (list_empty(&sclp_vt220_empty)) {
423 if (may_fail || sclp_vt220_suspended)
424 goto out;
425 if (sclp_vt220_drop_buffer())
426 break;
427 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
428
429 sclp_sync_wait();
430 spin_lock_irqsave(&sclp_vt220_lock, flags);
431 }
432 page = (void *) sclp_vt220_empty.next;
433 list_del((struct list_head *) page);
434 sclp_vt220_current_request =
435 sclp_vt220_initialize_page(page);
436 }
437 /* Try to write the string to the current request buffer */
438 written = sclp_vt220_add_msg(sclp_vt220_current_request,
439 buf, count, convertlf);
440 overall_written += written;
441 if (written == count)
442 break;
443 /*
444 * Not all characters could be written to the current
445 * output buffer. Emit the buffer, create a new buffer
446 * and then output the rest of the string.
447 */
448 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
449 sclp_vt220_emit_current();
450 spin_lock_irqsave(&sclp_vt220_lock, flags);
451 buf += written;
452 count -= written;
453 } while (count > 0);
454 /* Setup timer to output current console buffer after some time */
455 if (sclp_vt220_current_request != NULL &&
456 !timer_pending(&sclp_vt220_timer) && do_schedule) {
457 sclp_vt220_timer.function = sclp_vt220_timeout;
458 sclp_vt220_timer.data = 0UL;
459 sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
460 add_timer(&sclp_vt220_timer);
461 }
462 out:
463 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
464 return overall_written;
465 }
466
467 /*
468 * This routine is called by the kernel to write a series of
469 * characters to the tty device. The characters may come from
470 * user space or kernel space. This routine will return the
471 * number of characters actually accepted for writing.
472 */
473 static int
sclp_vt220_write(struct tty_struct * tty,const unsigned char * buf,int count)474 sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
475 {
476 return __sclp_vt220_write(buf, count, 1, 0, 1);
477 }
478
479 #define SCLP_VT220_SESSION_ENDED 0x01
480 #define SCLP_VT220_SESSION_STARTED 0x80
481 #define SCLP_VT220_SESSION_DATA 0x00
482
483 #ifdef CONFIG_MAGIC_SYSRQ
484
485 static int sysrq_pressed;
486 static struct sysrq_work sysrq;
487
sclp_vt220_reset_session(void)488 static void sclp_vt220_reset_session(void)
489 {
490 sysrq_pressed = 0;
491 }
492
sclp_vt220_handle_input(const char * buffer,unsigned int count)493 static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
494 {
495 int i;
496
497 for (i = 0; i < count; i++) {
498 /* Handle magic sys request */
499 if (buffer[i] == ('O' ^ 0100)) { /* CTRL-O */
500 /*
501 * If pressed again, reset sysrq_pressed
502 * and flip CTRL-O character
503 */
504 sysrq_pressed = !sysrq_pressed;
505 if (sysrq_pressed)
506 continue;
507 } else if (sysrq_pressed) {
508 sysrq.key = buffer[i];
509 schedule_sysrq_work(&sysrq);
510 sysrq_pressed = 0;
511 continue;
512 }
513 tty_insert_flip_char(&sclp_vt220_port, buffer[i], 0);
514 }
515 }
516
517 #else
518
sclp_vt220_reset_session(void)519 static void sclp_vt220_reset_session(void)
520 {
521 }
522
sclp_vt220_handle_input(const char * buffer,unsigned int count)523 static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
524 {
525 tty_insert_flip_string(&sclp_vt220_port, buffer, count);
526 }
527
528 #endif
529
530 /*
531 * Called by the SCLP to report incoming event buffers.
532 */
533 static void
sclp_vt220_receiver_fn(struct evbuf_header * evbuf)534 sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
535 {
536 char *buffer;
537 unsigned int count;
538
539 buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
540 count = evbuf->length - sizeof(struct evbuf_header);
541
542 switch (*buffer) {
543 case SCLP_VT220_SESSION_ENDED:
544 case SCLP_VT220_SESSION_STARTED:
545 sclp_vt220_reset_session();
546 break;
547 case SCLP_VT220_SESSION_DATA:
548 /* Send input to line discipline */
549 buffer++;
550 count--;
551 sclp_vt220_handle_input(buffer, count);
552 tty_flip_buffer_push(&sclp_vt220_port);
553 break;
554 }
555 }
556
557 /*
558 * This routine is called when a particular tty device is opened.
559 */
560 static int
sclp_vt220_open(struct tty_struct * tty,struct file * filp)561 sclp_vt220_open(struct tty_struct *tty, struct file *filp)
562 {
563 if (tty->count == 1) {
564 tty_port_tty_set(&sclp_vt220_port, tty);
565 sclp_vt220_port.low_latency = 0;
566 if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
567 tty->winsize.ws_row = 24;
568 tty->winsize.ws_col = 80;
569 }
570 }
571 return 0;
572 }
573
574 /*
575 * This routine is called when a particular tty device is closed.
576 */
577 static void
sclp_vt220_close(struct tty_struct * tty,struct file * filp)578 sclp_vt220_close(struct tty_struct *tty, struct file *filp)
579 {
580 if (tty->count == 1)
581 tty_port_tty_set(&sclp_vt220_port, NULL);
582 }
583
584 /*
585 * This routine is called by the kernel to write a single
586 * character to the tty device. If the kernel uses this routine,
587 * it must call the flush_chars() routine (if defined) when it is
588 * done stuffing characters into the driver.
589 */
590 static int
sclp_vt220_put_char(struct tty_struct * tty,unsigned char ch)591 sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
592 {
593 return __sclp_vt220_write(&ch, 1, 0, 0, 1);
594 }
595
596 /*
597 * This routine is called by the kernel after it has written a
598 * series of characters to the tty device using put_char().
599 */
600 static void
sclp_vt220_flush_chars(struct tty_struct * tty)601 sclp_vt220_flush_chars(struct tty_struct *tty)
602 {
603 if (!sclp_vt220_queue_running)
604 sclp_vt220_emit_current();
605 else
606 sclp_vt220_flush_later = 1;
607 }
608
609 /*
610 * This routine returns the numbers of characters the tty driver
611 * will accept for queuing to be written. This number is subject
612 * to change as output buffers get emptied, or if the output flow
613 * control is acted.
614 */
615 static int
sclp_vt220_write_room(struct tty_struct * tty)616 sclp_vt220_write_room(struct tty_struct *tty)
617 {
618 unsigned long flags;
619 struct list_head *l;
620 int count;
621
622 spin_lock_irqsave(&sclp_vt220_lock, flags);
623 count = 0;
624 if (sclp_vt220_current_request != NULL)
625 count = sclp_vt220_space_left(sclp_vt220_current_request);
626 list_for_each(l, &sclp_vt220_empty)
627 count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
628 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
629 return count;
630 }
631
632 /*
633 * Return number of buffered chars.
634 */
635 static int
sclp_vt220_chars_in_buffer(struct tty_struct * tty)636 sclp_vt220_chars_in_buffer(struct tty_struct *tty)
637 {
638 unsigned long flags;
639 struct list_head *l;
640 struct sclp_vt220_request *r;
641 int count;
642
643 spin_lock_irqsave(&sclp_vt220_lock, flags);
644 count = 0;
645 if (sclp_vt220_current_request != NULL)
646 count = sclp_vt220_chars_stored(sclp_vt220_current_request);
647 list_for_each(l, &sclp_vt220_outqueue) {
648 r = list_entry(l, struct sclp_vt220_request, list);
649 count += sclp_vt220_chars_stored(r);
650 }
651 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
652 return count;
653 }
654
655 /*
656 * Pass on all buffers to the hardware. Return only when there are no more
657 * buffers pending.
658 */
659 static void
sclp_vt220_flush_buffer(struct tty_struct * tty)660 sclp_vt220_flush_buffer(struct tty_struct *tty)
661 {
662 sclp_vt220_emit_current();
663 }
664
665 /* Release allocated pages. */
__sclp_vt220_free_pages(void)666 static void __init __sclp_vt220_free_pages(void)
667 {
668 struct list_head *page, *p;
669
670 list_for_each_safe(page, p, &sclp_vt220_empty) {
671 list_del(page);
672 free_page((unsigned long) page);
673 }
674 }
675
676 /* Release memory and unregister from sclp core. Controlled by init counting -
677 * only the last invoker will actually perform these actions. */
__sclp_vt220_cleanup(void)678 static void __init __sclp_vt220_cleanup(void)
679 {
680 sclp_vt220_init_count--;
681 if (sclp_vt220_init_count != 0)
682 return;
683 sclp_unregister(&sclp_vt220_register);
684 __sclp_vt220_free_pages();
685 tty_port_destroy(&sclp_vt220_port);
686 }
687
688 /* Allocate buffer pages and register with sclp core. Controlled by init
689 * counting - only the first invoker will actually perform these actions. */
__sclp_vt220_init(int num_pages)690 static int __init __sclp_vt220_init(int num_pages)
691 {
692 void *page;
693 int i;
694 int rc;
695
696 sclp_vt220_init_count++;
697 if (sclp_vt220_init_count != 1)
698 return 0;
699 spin_lock_init(&sclp_vt220_lock);
700 INIT_LIST_HEAD(&sclp_vt220_empty);
701 INIT_LIST_HEAD(&sclp_vt220_outqueue);
702 init_timer(&sclp_vt220_timer);
703 tty_port_init(&sclp_vt220_port);
704 sclp_vt220_current_request = NULL;
705 sclp_vt220_buffered_chars = 0;
706 sclp_vt220_flush_later = 0;
707
708 /* Allocate pages for output buffering */
709 rc = -ENOMEM;
710 for (i = 0; i < num_pages; i++) {
711 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
712 if (!page)
713 goto out;
714 list_add_tail(page, &sclp_vt220_empty);
715 }
716 rc = sclp_register(&sclp_vt220_register);
717 out:
718 if (rc) {
719 __sclp_vt220_free_pages();
720 sclp_vt220_init_count--;
721 tty_port_destroy(&sclp_vt220_port);
722 }
723 return rc;
724 }
725
726 static const struct tty_operations sclp_vt220_ops = {
727 .open = sclp_vt220_open,
728 .close = sclp_vt220_close,
729 .write = sclp_vt220_write,
730 .put_char = sclp_vt220_put_char,
731 .flush_chars = sclp_vt220_flush_chars,
732 .write_room = sclp_vt220_write_room,
733 .chars_in_buffer = sclp_vt220_chars_in_buffer,
734 .flush_buffer = sclp_vt220_flush_buffer,
735 };
736
737 /*
738 * Register driver with SCLP and Linux and initialize internal tty structures.
739 */
sclp_vt220_tty_init(void)740 static int __init sclp_vt220_tty_init(void)
741 {
742 struct tty_driver *driver;
743 int rc;
744
745 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
746 * symmetry between VM and LPAR systems regarding ttyS1. */
747 driver = alloc_tty_driver(1);
748 if (!driver)
749 return -ENOMEM;
750 rc = __sclp_vt220_init(MAX_KMEM_PAGES);
751 if (rc)
752 goto out_driver;
753
754 driver->driver_name = SCLP_VT220_DRIVER_NAME;
755 driver->name = SCLP_VT220_DEVICE_NAME;
756 driver->major = SCLP_VT220_MAJOR;
757 driver->minor_start = SCLP_VT220_MINOR;
758 driver->type = TTY_DRIVER_TYPE_SYSTEM;
759 driver->subtype = SYSTEM_TYPE_TTY;
760 driver->init_termios = tty_std_termios;
761 driver->flags = TTY_DRIVER_REAL_RAW;
762 tty_set_operations(driver, &sclp_vt220_ops);
763 tty_port_link_device(&sclp_vt220_port, driver, 0);
764
765 rc = tty_register_driver(driver);
766 if (rc)
767 goto out_init;
768 rc = sclp_register(&sclp_vt220_register_input);
769 if (rc)
770 goto out_reg;
771 sclp_vt220_driver = driver;
772 return 0;
773
774 out_reg:
775 tty_unregister_driver(driver);
776 out_init:
777 __sclp_vt220_cleanup();
778 out_driver:
779 put_tty_driver(driver);
780 return rc;
781 }
782 __initcall(sclp_vt220_tty_init);
783
__sclp_vt220_flush_buffer(void)784 static void __sclp_vt220_flush_buffer(void)
785 {
786 unsigned long flags;
787
788 sclp_vt220_emit_current();
789 spin_lock_irqsave(&sclp_vt220_lock, flags);
790 if (timer_pending(&sclp_vt220_timer))
791 del_timer(&sclp_vt220_timer);
792 while (sclp_vt220_queue_running) {
793 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
794 sclp_sync_wait();
795 spin_lock_irqsave(&sclp_vt220_lock, flags);
796 }
797 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
798 }
799
800 /*
801 * Resume console: If there are cached messages, emit them.
802 */
sclp_vt220_resume(void)803 static void sclp_vt220_resume(void)
804 {
805 unsigned long flags;
806
807 spin_lock_irqsave(&sclp_vt220_lock, flags);
808 sclp_vt220_suspended = 0;
809 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
810 sclp_vt220_emit_current();
811 }
812
813 /*
814 * Suspend console: Set suspend flag and flush console
815 */
sclp_vt220_suspend(void)816 static void sclp_vt220_suspend(void)
817 {
818 unsigned long flags;
819
820 spin_lock_irqsave(&sclp_vt220_lock, flags);
821 sclp_vt220_suspended = 1;
822 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
823 __sclp_vt220_flush_buffer();
824 }
825
sclp_vt220_pm_event_fn(struct sclp_register * reg,enum sclp_pm_event sclp_pm_event)826 static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
827 enum sclp_pm_event sclp_pm_event)
828 {
829 switch (sclp_pm_event) {
830 case SCLP_PM_EVENT_FREEZE:
831 sclp_vt220_suspend();
832 break;
833 case SCLP_PM_EVENT_RESTORE:
834 case SCLP_PM_EVENT_THAW:
835 sclp_vt220_resume();
836 break;
837 }
838 }
839
840 #ifdef CONFIG_SCLP_VT220_CONSOLE
841
842 static void
sclp_vt220_con_write(struct console * con,const char * buf,unsigned int count)843 sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
844 {
845 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
846 }
847
848 static struct tty_driver *
sclp_vt220_con_device(struct console * c,int * index)849 sclp_vt220_con_device(struct console *c, int *index)
850 {
851 *index = 0;
852 return sclp_vt220_driver;
853 }
854
855 static int
sclp_vt220_notify(struct notifier_block * self,unsigned long event,void * data)856 sclp_vt220_notify(struct notifier_block *self,
857 unsigned long event, void *data)
858 {
859 __sclp_vt220_flush_buffer();
860 return NOTIFY_OK;
861 }
862
863 static struct notifier_block on_panic_nb = {
864 .notifier_call = sclp_vt220_notify,
865 .priority = 1,
866 };
867
868 static struct notifier_block on_reboot_nb = {
869 .notifier_call = sclp_vt220_notify,
870 .priority = 1,
871 };
872
873 /* Structure needed to register with printk */
874 static struct console sclp_vt220_console =
875 {
876 .name = SCLP_VT220_CONSOLE_NAME,
877 .write = sclp_vt220_con_write,
878 .device = sclp_vt220_con_device,
879 .flags = CON_PRINTBUFFER,
880 .index = SCLP_VT220_CONSOLE_INDEX
881 };
882
883 static int __init
sclp_vt220_con_init(void)884 sclp_vt220_con_init(void)
885 {
886 int rc;
887
888 rc = __sclp_vt220_init(sclp_console_pages);
889 if (rc)
890 return rc;
891 /* Attach linux console */
892 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
893 register_reboot_notifier(&on_reboot_nb);
894 register_console(&sclp_vt220_console);
895 return 0;
896 }
897
898 console_initcall(sclp_vt220_con_init);
899 #endif /* CONFIG_SCLP_VT220_CONSOLE */
900
901