1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2004 Hollis Blanchard <hollisb@us.ibm.com>, IBM
4 */
5
6 /* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS
7 * and the service processor on IBM pSeries servers. On these servers, there
8 * are no serial ports under the OS's control, and sometimes there is no other
9 * console available either. However, the service processor has two standard
10 * serial ports, so this over-complicated protocol allows the OS to control
11 * those ports by proxy.
12 *
13 * Besides data, the procotol supports the reading/writing of the serial
14 * port's DTR line, and the reading of the CD line. This is to allow the OS to
15 * control a modem attached to the service processor's serial port. Note that
16 * the OS cannot change the speed of the port through this protocol.
17 */
18
19 #undef DEBUG
20
21 #include <linux/console.h>
22 #include <linux/ctype.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/major.h>
28 #include <linux/kernel.h>
29 #include <linux/spinlock.h>
30 #include <linux/sysrq.h>
31 #include <linux/tty.h>
32 #include <linux/tty_flip.h>
33 #include <asm/hvcall.h>
34 #include <asm/hvconsole.h>
35 #include <asm/prom.h>
36 #include <linux/uaccess.h>
37 #include <asm/vio.h>
38 #include <asm/param.h>
39 #include <asm/hvsi.h>
40
41 #define HVSI_MAJOR 229
42 #define HVSI_MINOR 128
43 #define MAX_NR_HVSI_CONSOLES 4
44
45 #define HVSI_TIMEOUT (5*HZ)
46 #define HVSI_VERSION 1
47 #define HVSI_MAX_PACKET 256
48 #define HVSI_MAX_READ 16
49 #define HVSI_MAX_OUTGOING_DATA 12
50 #define N_OUTBUF 12
51
52 /*
53 * we pass data via two 8-byte registers, so we would like our char arrays
54 * properly aligned for those loads.
55 */
56 #define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
57
58 struct hvsi_struct {
59 struct tty_port port;
60 struct delayed_work writer;
61 struct work_struct handshaker;
62 wait_queue_head_t emptyq; /* woken when outbuf is emptied */
63 wait_queue_head_t stateq; /* woken when HVSI state changes */
64 spinlock_t lock;
65 int index;
66 uint8_t throttle_buf[128];
67 uint8_t outbuf[N_OUTBUF]; /* to implement write_room and chars_in_buffer */
68 /* inbuf is for packet reassembly. leave a little room for leftovers. */
69 uint8_t inbuf[HVSI_MAX_PACKET + HVSI_MAX_READ];
70 uint8_t *inbuf_end;
71 int n_throttle;
72 int n_outbuf;
73 uint32_t vtermno;
74 uint32_t virq;
75 atomic_t seqno; /* HVSI packet sequence number */
76 uint16_t mctrl;
77 uint8_t state; /* HVSI protocol state */
78 uint8_t flags;
79 #ifdef CONFIG_MAGIC_SYSRQ
80 uint8_t sysrq;
81 #endif /* CONFIG_MAGIC_SYSRQ */
82 };
83 static struct hvsi_struct hvsi_ports[MAX_NR_HVSI_CONSOLES];
84
85 static struct tty_driver *hvsi_driver;
86 static int hvsi_count;
87 static int (*hvsi_wait)(struct hvsi_struct *hp, int state);
88
89 enum HVSI_PROTOCOL_STATE {
90 HVSI_CLOSED,
91 HVSI_WAIT_FOR_VER_RESPONSE,
92 HVSI_WAIT_FOR_VER_QUERY,
93 HVSI_OPEN,
94 HVSI_WAIT_FOR_MCTRL_RESPONSE,
95 HVSI_FSP_DIED,
96 };
97 #define HVSI_CONSOLE 0x1
98
is_console(struct hvsi_struct * hp)99 static inline int is_console(struct hvsi_struct *hp)
100 {
101 return hp->flags & HVSI_CONSOLE;
102 }
103
is_open(struct hvsi_struct * hp)104 static inline int is_open(struct hvsi_struct *hp)
105 {
106 /* if we're waiting for an mctrl then we're already open */
107 return (hp->state == HVSI_OPEN)
108 || (hp->state == HVSI_WAIT_FOR_MCTRL_RESPONSE);
109 }
110
print_state(struct hvsi_struct * hp)111 static inline void print_state(struct hvsi_struct *hp)
112 {
113 #ifdef DEBUG
114 static const char *state_names[] = {
115 "HVSI_CLOSED",
116 "HVSI_WAIT_FOR_VER_RESPONSE",
117 "HVSI_WAIT_FOR_VER_QUERY",
118 "HVSI_OPEN",
119 "HVSI_WAIT_FOR_MCTRL_RESPONSE",
120 "HVSI_FSP_DIED",
121 };
122 const char *name = (hp->state < ARRAY_SIZE(state_names))
123 ? state_names[hp->state] : "UNKNOWN";
124
125 pr_debug("hvsi%i: state = %s\n", hp->index, name);
126 #endif /* DEBUG */
127 }
128
__set_state(struct hvsi_struct * hp,int state)129 static inline void __set_state(struct hvsi_struct *hp, int state)
130 {
131 hp->state = state;
132 print_state(hp);
133 wake_up_all(&hp->stateq);
134 }
135
set_state(struct hvsi_struct * hp,int state)136 static inline void set_state(struct hvsi_struct *hp, int state)
137 {
138 unsigned long flags;
139
140 spin_lock_irqsave(&hp->lock, flags);
141 __set_state(hp, state);
142 spin_unlock_irqrestore(&hp->lock, flags);
143 }
144
len_packet(const uint8_t * packet)145 static inline int len_packet(const uint8_t *packet)
146 {
147 return (int)((struct hvsi_header *)packet)->len;
148 }
149
is_header(const uint8_t * packet)150 static inline int is_header(const uint8_t *packet)
151 {
152 struct hvsi_header *header = (struct hvsi_header *)packet;
153 return header->type >= VS_QUERY_RESPONSE_PACKET_HEADER;
154 }
155
got_packet(const struct hvsi_struct * hp,uint8_t * packet)156 static inline int got_packet(const struct hvsi_struct *hp, uint8_t *packet)
157 {
158 if (hp->inbuf_end < packet + sizeof(struct hvsi_header))
159 return 0; /* don't even have the packet header */
160
161 if (hp->inbuf_end < (packet + len_packet(packet)))
162 return 0; /* don't have the rest of the packet */
163
164 return 1;
165 }
166
167 /* shift remaining bytes in packetbuf down */
compact_inbuf(struct hvsi_struct * hp,uint8_t * read_to)168 static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to)
169 {
170 int remaining = (int)(hp->inbuf_end - read_to);
171
172 pr_debug("%s: %i chars remain\n", __func__, remaining);
173
174 if (read_to != hp->inbuf)
175 memmove(hp->inbuf, read_to, remaining);
176
177 hp->inbuf_end = hp->inbuf + remaining;
178 }
179
180 #ifdef DEBUG
181 #define dbg_dump_packet(packet) dump_packet(packet)
182 #define dbg_dump_hex(data, len) dump_hex(data, len)
183 #else
184 #define dbg_dump_packet(packet) do { } while (0)
185 #define dbg_dump_hex(data, len) do { } while (0)
186 #endif
187
dump_hex(const uint8_t * data,int len)188 static void dump_hex(const uint8_t *data, int len)
189 {
190 int i;
191
192 printk(" ");
193 for (i=0; i < len; i++)
194 printk("%.2x", data[i]);
195
196 printk("\n ");
197 for (i=0; i < len; i++) {
198 if (isprint(data[i]))
199 printk("%c", data[i]);
200 else
201 printk(".");
202 }
203 printk("\n");
204 }
205
dump_packet(uint8_t * packet)206 static void dump_packet(uint8_t *packet)
207 {
208 struct hvsi_header *header = (struct hvsi_header *)packet;
209
210 printk("type 0x%x, len %i, seqno %i:\n", header->type, header->len,
211 header->seqno);
212
213 dump_hex(packet, header->len);
214 }
215
hvsi_read(struct hvsi_struct * hp,char * buf,int count)216 static int hvsi_read(struct hvsi_struct *hp, char *buf, int count)
217 {
218 unsigned long got;
219
220 got = hvc_get_chars(hp->vtermno, buf, count);
221
222 return got;
223 }
224
hvsi_recv_control(struct hvsi_struct * hp,uint8_t * packet,struct tty_struct * tty,struct hvsi_struct ** to_handshake)225 static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet,
226 struct tty_struct *tty, struct hvsi_struct **to_handshake)
227 {
228 struct hvsi_control *header = (struct hvsi_control *)packet;
229
230 switch (be16_to_cpu(header->verb)) {
231 case VSV_MODEM_CTL_UPDATE:
232 if ((be32_to_cpu(header->word) & HVSI_TSCD) == 0) {
233 /* CD went away; no more connection */
234 pr_debug("hvsi%i: CD dropped\n", hp->index);
235 hp->mctrl &= TIOCM_CD;
236 if (tty && !C_CLOCAL(tty))
237 tty_hangup(tty);
238 }
239 break;
240 case VSV_CLOSE_PROTOCOL:
241 pr_debug("hvsi%i: service processor came back\n", hp->index);
242 if (hp->state != HVSI_CLOSED) {
243 *to_handshake = hp;
244 }
245 break;
246 default:
247 printk(KERN_WARNING "hvsi%i: unknown HVSI control packet: ",
248 hp->index);
249 dump_packet(packet);
250 break;
251 }
252 }
253
hvsi_recv_response(struct hvsi_struct * hp,uint8_t * packet)254 static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet)
255 {
256 struct hvsi_query_response *resp = (struct hvsi_query_response *)packet;
257 uint32_t mctrl_word;
258
259 switch (hp->state) {
260 case HVSI_WAIT_FOR_VER_RESPONSE:
261 __set_state(hp, HVSI_WAIT_FOR_VER_QUERY);
262 break;
263 case HVSI_WAIT_FOR_MCTRL_RESPONSE:
264 hp->mctrl = 0;
265 mctrl_word = be32_to_cpu(resp->u.mctrl_word);
266 if (mctrl_word & HVSI_TSDTR)
267 hp->mctrl |= TIOCM_DTR;
268 if (mctrl_word & HVSI_TSCD)
269 hp->mctrl |= TIOCM_CD;
270 __set_state(hp, HVSI_OPEN);
271 break;
272 default:
273 printk(KERN_ERR "hvsi%i: unexpected query response: ", hp->index);
274 dump_packet(packet);
275 break;
276 }
277 }
278
279 /* respond to service processor's version query */
hvsi_version_respond(struct hvsi_struct * hp,uint16_t query_seqno)280 static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
281 {
282 struct hvsi_query_response packet __ALIGNED__;
283 int wrote;
284
285 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
286 packet.hdr.len = sizeof(struct hvsi_query_response);
287 packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
288 packet.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
289 packet.u.version = HVSI_VERSION;
290 packet.query_seqno = cpu_to_be16(query_seqno+1);
291
292 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
293 dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
294
295 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
296 if (wrote != packet.hdr.len) {
297 printk(KERN_ERR "hvsi%i: couldn't send query response!\n",
298 hp->index);
299 return -EIO;
300 }
301
302 return 0;
303 }
304
hvsi_recv_query(struct hvsi_struct * hp,uint8_t * packet)305 static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet)
306 {
307 struct hvsi_query *query = (struct hvsi_query *)packet;
308
309 switch (hp->state) {
310 case HVSI_WAIT_FOR_VER_QUERY:
311 hvsi_version_respond(hp, be16_to_cpu(query->hdr.seqno));
312 __set_state(hp, HVSI_OPEN);
313 break;
314 default:
315 printk(KERN_ERR "hvsi%i: unexpected query: ", hp->index);
316 dump_packet(packet);
317 break;
318 }
319 }
320
hvsi_insert_chars(struct hvsi_struct * hp,const char * buf,int len)321 static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
322 {
323 int i;
324
325 for (i=0; i < len; i++) {
326 char c = buf[i];
327 #ifdef CONFIG_MAGIC_SYSRQ
328 if (c == '\0') {
329 hp->sysrq = 1;
330 continue;
331 } else if (hp->sysrq) {
332 handle_sysrq(c);
333 hp->sysrq = 0;
334 continue;
335 }
336 #endif /* CONFIG_MAGIC_SYSRQ */
337 tty_insert_flip_char(&hp->port, c, 0);
338 }
339 }
340
341 /*
342 * We could get 252 bytes of data at once here. But the tty layer only
343 * throttles us at TTY_THRESHOLD_THROTTLE (128) bytes, so we could overflow
344 * it. Accordingly we won't send more than 128 bytes at a time to the flip
345 * buffer, which will give the tty buffer a chance to throttle us. Should the
346 * value of TTY_THRESHOLD_THROTTLE change in n_tty.c, this code should be
347 * revisited.
348 */
349 #define TTY_THRESHOLD_THROTTLE 128
hvsi_recv_data(struct hvsi_struct * hp,const uint8_t * packet)350 static bool hvsi_recv_data(struct hvsi_struct *hp, const uint8_t *packet)
351 {
352 const struct hvsi_header *header = (const struct hvsi_header *)packet;
353 const uint8_t *data = packet + sizeof(struct hvsi_header);
354 int datalen = header->len - sizeof(struct hvsi_header);
355 int overflow = datalen - TTY_THRESHOLD_THROTTLE;
356
357 pr_debug("queueing %i chars '%.*s'\n", datalen, datalen, data);
358
359 if (datalen == 0)
360 return false;
361
362 if (overflow > 0) {
363 pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__);
364 datalen = TTY_THRESHOLD_THROTTLE;
365 }
366
367 hvsi_insert_chars(hp, data, datalen);
368
369 if (overflow > 0) {
370 /*
371 * we still have more data to deliver, so we need to save off the
372 * overflow and send it later
373 */
374 pr_debug("%s: deferring overflow\n", __func__);
375 memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow);
376 hp->n_throttle = overflow;
377 }
378
379 return true;
380 }
381
382 /*
383 * Returns true/false indicating data successfully read from hypervisor.
384 * Used both to get packets for tty connections and to advance the state
385 * machine during console handshaking (in which case tty = NULL and we ignore
386 * incoming data).
387 */
hvsi_load_chunk(struct hvsi_struct * hp,struct tty_struct * tty,struct hvsi_struct ** handshake)388 static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct *tty,
389 struct hvsi_struct **handshake)
390 {
391 uint8_t *packet = hp->inbuf;
392 int chunklen;
393 bool flip = false;
394
395 *handshake = NULL;
396
397 chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ);
398 if (chunklen == 0) {
399 pr_debug("%s: 0-length read\n", __func__);
400 return 0;
401 }
402
403 pr_debug("%s: got %i bytes\n", __func__, chunklen);
404 dbg_dump_hex(hp->inbuf_end, chunklen);
405
406 hp->inbuf_end += chunklen;
407
408 /* handle all completed packets */
409 while ((packet < hp->inbuf_end) && got_packet(hp, packet)) {
410 struct hvsi_header *header = (struct hvsi_header *)packet;
411
412 if (!is_header(packet)) {
413 printk(KERN_ERR "hvsi%i: got malformed packet\n", hp->index);
414 /* skip bytes until we find a header or run out of data */
415 while ((packet < hp->inbuf_end) && (!is_header(packet)))
416 packet++;
417 continue;
418 }
419
420 pr_debug("%s: handling %i-byte packet\n", __func__,
421 len_packet(packet));
422 dbg_dump_packet(packet);
423
424 switch (header->type) {
425 case VS_DATA_PACKET_HEADER:
426 if (!is_open(hp))
427 break;
428 flip = hvsi_recv_data(hp, packet);
429 break;
430 case VS_CONTROL_PACKET_HEADER:
431 hvsi_recv_control(hp, packet, tty, handshake);
432 break;
433 case VS_QUERY_RESPONSE_PACKET_HEADER:
434 hvsi_recv_response(hp, packet);
435 break;
436 case VS_QUERY_PACKET_HEADER:
437 hvsi_recv_query(hp, packet);
438 break;
439 default:
440 printk(KERN_ERR "hvsi%i: unknown HVSI packet type 0x%x\n",
441 hp->index, header->type);
442 dump_packet(packet);
443 break;
444 }
445
446 packet += len_packet(packet);
447
448 if (*handshake) {
449 pr_debug("%s: handshake\n", __func__);
450 break;
451 }
452 }
453
454 compact_inbuf(hp, packet);
455
456 if (flip)
457 tty_flip_buffer_push(&hp->port);
458
459 return 1;
460 }
461
hvsi_send_overflow(struct hvsi_struct * hp)462 static void hvsi_send_overflow(struct hvsi_struct *hp)
463 {
464 pr_debug("%s: delivering %i bytes overflow\n", __func__,
465 hp->n_throttle);
466
467 hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle);
468 hp->n_throttle = 0;
469 }
470
471 /*
472 * must get all pending data because we only get an irq on empty->non-empty
473 * transition
474 */
hvsi_interrupt(int irq,void * arg)475 static irqreturn_t hvsi_interrupt(int irq, void *arg)
476 {
477 struct hvsi_struct *hp = (struct hvsi_struct *)arg;
478 struct hvsi_struct *handshake;
479 struct tty_struct *tty;
480 unsigned long flags;
481 int again = 1;
482
483 pr_debug("%s\n", __func__);
484
485 tty = tty_port_tty_get(&hp->port);
486
487 while (again) {
488 spin_lock_irqsave(&hp->lock, flags);
489 again = hvsi_load_chunk(hp, tty, &handshake);
490 spin_unlock_irqrestore(&hp->lock, flags);
491
492 if (handshake) {
493 pr_debug("hvsi%i: attempting re-handshake\n", handshake->index);
494 schedule_work(&handshake->handshaker);
495 }
496 }
497
498 spin_lock_irqsave(&hp->lock, flags);
499 if (tty && hp->n_throttle && !tty_throttled(tty)) {
500 /* we weren't hung up and we weren't throttled, so we can
501 * deliver the rest now */
502 hvsi_send_overflow(hp);
503 tty_flip_buffer_push(&hp->port);
504 }
505 spin_unlock_irqrestore(&hp->lock, flags);
506
507 tty_kref_put(tty);
508
509 return IRQ_HANDLED;
510 }
511
512 /* for boot console, before the irq handler is running */
poll_for_state(struct hvsi_struct * hp,int state)513 static int __init poll_for_state(struct hvsi_struct *hp, int state)
514 {
515 unsigned long end_jiffies = jiffies + HVSI_TIMEOUT;
516
517 for (;;) {
518 hvsi_interrupt(hp->virq, (void *)hp); /* get pending data */
519
520 if (hp->state == state)
521 return 0;
522
523 mdelay(5);
524 if (time_after(jiffies, end_jiffies))
525 return -EIO;
526 }
527 }
528
529 /* wait for irq handler to change our state */
wait_for_state(struct hvsi_struct * hp,int state)530 static int wait_for_state(struct hvsi_struct *hp, int state)
531 {
532 int ret = 0;
533
534 if (!wait_event_timeout(hp->stateq, (hp->state == state), HVSI_TIMEOUT))
535 ret = -EIO;
536
537 return ret;
538 }
539
hvsi_query(struct hvsi_struct * hp,uint16_t verb)540 static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
541 {
542 struct hvsi_query packet __ALIGNED__;
543 int wrote;
544
545 packet.hdr.type = VS_QUERY_PACKET_HEADER;
546 packet.hdr.len = sizeof(struct hvsi_query);
547 packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
548 packet.verb = cpu_to_be16(verb);
549
550 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
551 dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
552
553 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
554 if (wrote != packet.hdr.len) {
555 printk(KERN_ERR "hvsi%i: couldn't send query (%i)!\n", hp->index,
556 wrote);
557 return -EIO;
558 }
559
560 return 0;
561 }
562
hvsi_get_mctrl(struct hvsi_struct * hp)563 static int hvsi_get_mctrl(struct hvsi_struct *hp)
564 {
565 int ret;
566
567 set_state(hp, HVSI_WAIT_FOR_MCTRL_RESPONSE);
568 hvsi_query(hp, VSV_SEND_MODEM_CTL_STATUS);
569
570 ret = hvsi_wait(hp, HVSI_OPEN);
571 if (ret < 0) {
572 printk(KERN_ERR "hvsi%i: didn't get modem flags\n", hp->index);
573 set_state(hp, HVSI_OPEN);
574 return ret;
575 }
576
577 pr_debug("%s: mctrl 0x%x\n", __func__, hp->mctrl);
578
579 return 0;
580 }
581
582 /* note that we can only set DTR */
hvsi_set_mctrl(struct hvsi_struct * hp,uint16_t mctrl)583 static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
584 {
585 struct hvsi_control packet __ALIGNED__;
586 int wrote;
587
588 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
589 packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
590 packet.hdr.len = sizeof(struct hvsi_control);
591 packet.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
592 packet.mask = cpu_to_be32(HVSI_TSDTR);
593
594 if (mctrl & TIOCM_DTR)
595 packet.word = cpu_to_be32(HVSI_TSDTR);
596
597 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
598 dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
599
600 wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
601 if (wrote != packet.hdr.len) {
602 printk(KERN_ERR "hvsi%i: couldn't set DTR!\n", hp->index);
603 return -EIO;
604 }
605
606 return 0;
607 }
608
hvsi_drain_input(struct hvsi_struct * hp)609 static void hvsi_drain_input(struct hvsi_struct *hp)
610 {
611 uint8_t buf[HVSI_MAX_READ] __ALIGNED__;
612 unsigned long end_jiffies = jiffies + HVSI_TIMEOUT;
613
614 while (time_before(end_jiffies, jiffies))
615 if (0 == hvsi_read(hp, buf, HVSI_MAX_READ))
616 break;
617 }
618
hvsi_handshake(struct hvsi_struct * hp)619 static int hvsi_handshake(struct hvsi_struct *hp)
620 {
621 int ret;
622
623 /*
624 * We could have a CLOSE or other data waiting for us before we even try
625 * to open; try to throw it all away so we don't get confused. (CLOSE
626 * is the first message sent up the pipe when the FSP comes online. We
627 * need to distinguish between "it came up a while ago and we're the first
628 * user" and "it was just reset before it saw our handshake packet".)
629 */
630 hvsi_drain_input(hp);
631
632 set_state(hp, HVSI_WAIT_FOR_VER_RESPONSE);
633 ret = hvsi_query(hp, VSV_SEND_VERSION_NUMBER);
634 if (ret < 0) {
635 printk(KERN_ERR "hvsi%i: couldn't send version query\n", hp->index);
636 return ret;
637 }
638
639 ret = hvsi_wait(hp, HVSI_OPEN);
640 if (ret < 0)
641 return ret;
642
643 return 0;
644 }
645
hvsi_handshaker(struct work_struct * work)646 static void hvsi_handshaker(struct work_struct *work)
647 {
648 struct hvsi_struct *hp =
649 container_of(work, struct hvsi_struct, handshaker);
650
651 if (hvsi_handshake(hp) >= 0)
652 return;
653
654 printk(KERN_ERR "hvsi%i: re-handshaking failed\n", hp->index);
655 if (is_console(hp)) {
656 /*
657 * ttys will re-attempt the handshake via hvsi_open, but
658 * the console will not.
659 */
660 printk(KERN_ERR "hvsi%i: lost console!\n", hp->index);
661 }
662 }
663
hvsi_put_chars(struct hvsi_struct * hp,const char * buf,int count)664 static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
665 {
666 struct hvsi_data packet __ALIGNED__;
667 int ret;
668
669 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
670
671 packet.hdr.type = VS_DATA_PACKET_HEADER;
672 packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
673 packet.hdr.len = count + sizeof(struct hvsi_header);
674 memcpy(&packet.data, buf, count);
675
676 ret = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
677 if (ret == packet.hdr.len) {
678 /* return the number of chars written, not the packet length */
679 return count;
680 }
681 return ret; /* return any errors */
682 }
683
hvsi_close_protocol(struct hvsi_struct * hp)684 static void hvsi_close_protocol(struct hvsi_struct *hp)
685 {
686 struct hvsi_control packet __ALIGNED__;
687
688 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
689 packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
690 packet.hdr.len = 6;
691 packet.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
692
693 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
694 dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
695
696 hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
697 }
698
hvsi_open(struct tty_struct * tty,struct file * filp)699 static int hvsi_open(struct tty_struct *tty, struct file *filp)
700 {
701 struct hvsi_struct *hp;
702 unsigned long flags;
703 int ret;
704
705 pr_debug("%s\n", __func__);
706
707 hp = &hvsi_ports[tty->index];
708
709 tty->driver_data = hp;
710
711 mb();
712 if (hp->state == HVSI_FSP_DIED)
713 return -EIO;
714
715 tty_port_tty_set(&hp->port, tty);
716 spin_lock_irqsave(&hp->lock, flags);
717 hp->port.count++;
718 atomic_set(&hp->seqno, 0);
719 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
720 spin_unlock_irqrestore(&hp->lock, flags);
721
722 if (is_console(hp))
723 return 0; /* this has already been handshaked as the console */
724
725 ret = hvsi_handshake(hp);
726 if (ret < 0) {
727 printk(KERN_ERR "%s: HVSI handshaking failed\n", tty->name);
728 return ret;
729 }
730
731 ret = hvsi_get_mctrl(hp);
732 if (ret < 0) {
733 printk(KERN_ERR "%s: couldn't get initial modem flags\n", tty->name);
734 return ret;
735 }
736
737 ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
738 if (ret < 0) {
739 printk(KERN_ERR "%s: couldn't set DTR\n", tty->name);
740 return ret;
741 }
742
743 return 0;
744 }
745
746 /* wait for hvsi_write_worker to empty hp->outbuf */
hvsi_flush_output(struct hvsi_struct * hp)747 static void hvsi_flush_output(struct hvsi_struct *hp)
748 {
749 wait_event_timeout(hp->emptyq, (hp->n_outbuf <= 0), HVSI_TIMEOUT);
750
751 /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
752 cancel_delayed_work_sync(&hp->writer);
753 flush_work(&hp->handshaker);
754
755 /*
756 * it's also possible that our timeout expired and hvsi_write_worker
757 * didn't manage to push outbuf. poof.
758 */
759 hp->n_outbuf = 0;
760 }
761
hvsi_close(struct tty_struct * tty,struct file * filp)762 static void hvsi_close(struct tty_struct *tty, struct file *filp)
763 {
764 struct hvsi_struct *hp = tty->driver_data;
765 unsigned long flags;
766
767 pr_debug("%s\n", __func__);
768
769 if (tty_hung_up_p(filp))
770 return;
771
772 spin_lock_irqsave(&hp->lock, flags);
773
774 if (--hp->port.count == 0) {
775 tty_port_tty_set(&hp->port, NULL);
776 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
777
778 /* only close down connection if it is not the console */
779 if (!is_console(hp)) {
780 h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); /* no more irqs */
781 __set_state(hp, HVSI_CLOSED);
782 /*
783 * any data delivered to the tty layer after this will be
784 * discarded (except for XON/XOFF)
785 */
786 tty->closing = 1;
787
788 spin_unlock_irqrestore(&hp->lock, flags);
789
790 /* let any existing irq handlers finish. no more will start. */
791 synchronize_irq(hp->virq);
792
793 /* hvsi_write_worker will re-schedule until outbuf is empty. */
794 hvsi_flush_output(hp);
795
796 /* tell FSP to stop sending data */
797 hvsi_close_protocol(hp);
798
799 /*
800 * drain anything FSP is still in the middle of sending, and let
801 * hvsi_handshake drain the rest on the next open.
802 */
803 hvsi_drain_input(hp);
804
805 spin_lock_irqsave(&hp->lock, flags);
806 }
807 } else if (hp->port.count < 0)
808 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
809 hp - hvsi_ports, hp->port.count);
810
811 spin_unlock_irqrestore(&hp->lock, flags);
812 }
813
hvsi_hangup(struct tty_struct * tty)814 static void hvsi_hangup(struct tty_struct *tty)
815 {
816 struct hvsi_struct *hp = tty->driver_data;
817 unsigned long flags;
818
819 pr_debug("%s\n", __func__);
820
821 tty_port_tty_set(&hp->port, NULL);
822
823 spin_lock_irqsave(&hp->lock, flags);
824 hp->port.count = 0;
825 hp->n_outbuf = 0;
826 spin_unlock_irqrestore(&hp->lock, flags);
827 }
828
829 /* called with hp->lock held */
hvsi_push(struct hvsi_struct * hp)830 static void hvsi_push(struct hvsi_struct *hp)
831 {
832 int n;
833
834 if (hp->n_outbuf <= 0)
835 return;
836
837 n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf);
838 if (n > 0) {
839 /* success */
840 pr_debug("%s: wrote %i chars\n", __func__, n);
841 hp->n_outbuf = 0;
842 } else if (n == -EIO) {
843 __set_state(hp, HVSI_FSP_DIED);
844 printk(KERN_ERR "hvsi%i: service processor died\n", hp->index);
845 }
846 }
847
848 /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
hvsi_write_worker(struct work_struct * work)849 static void hvsi_write_worker(struct work_struct *work)
850 {
851 struct hvsi_struct *hp =
852 container_of(work, struct hvsi_struct, writer.work);
853 unsigned long flags;
854 #ifdef DEBUG
855 static long start_j = 0;
856
857 if (start_j == 0)
858 start_j = jiffies;
859 #endif /* DEBUG */
860
861 spin_lock_irqsave(&hp->lock, flags);
862
863 pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
864
865 if (!is_open(hp)) {
866 /*
867 * We could have a non-open connection if the service processor died
868 * while we were busily scheduling ourselves. In that case, it could
869 * be minutes before the service processor comes back, so only try
870 * again once a second.
871 */
872 schedule_delayed_work(&hp->writer, HZ);
873 goto out;
874 }
875
876 hvsi_push(hp);
877 if (hp->n_outbuf > 0)
878 schedule_delayed_work(&hp->writer, 10);
879 else {
880 #ifdef DEBUG
881 pr_debug("%s: outbuf emptied after %li jiffies\n", __func__,
882 jiffies - start_j);
883 start_j = 0;
884 #endif /* DEBUG */
885 wake_up_all(&hp->emptyq);
886 tty_port_tty_wakeup(&hp->port);
887 }
888
889 out:
890 spin_unlock_irqrestore(&hp->lock, flags);
891 }
892
hvsi_write_room(struct tty_struct * tty)893 static int hvsi_write_room(struct tty_struct *tty)
894 {
895 struct hvsi_struct *hp = tty->driver_data;
896
897 return N_OUTBUF - hp->n_outbuf;
898 }
899
hvsi_chars_in_buffer(struct tty_struct * tty)900 static int hvsi_chars_in_buffer(struct tty_struct *tty)
901 {
902 struct hvsi_struct *hp = tty->driver_data;
903
904 return hp->n_outbuf;
905 }
906
hvsi_write(struct tty_struct * tty,const unsigned char * buf,int count)907 static int hvsi_write(struct tty_struct *tty,
908 const unsigned char *buf, int count)
909 {
910 struct hvsi_struct *hp = tty->driver_data;
911 const char *source = buf;
912 unsigned long flags;
913 int total = 0;
914 int origcount = count;
915
916 spin_lock_irqsave(&hp->lock, flags);
917
918 pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
919
920 if (!is_open(hp)) {
921 /* we're either closing or not yet open; don't accept data */
922 pr_debug("%s: not open\n", __func__);
923 goto out;
924 }
925
926 /*
927 * when the hypervisor buffer (16K) fills, data will stay in hp->outbuf
928 * and hvsi_write_worker will be scheduled. subsequent hvsi_write() calls
929 * will see there is no room in outbuf and return.
930 */
931 while ((count > 0) && (hvsi_write_room(tty) > 0)) {
932 int chunksize = min(count, hvsi_write_room(tty));
933
934 BUG_ON(hp->n_outbuf < 0);
935 memcpy(hp->outbuf + hp->n_outbuf, source, chunksize);
936 hp->n_outbuf += chunksize;
937
938 total += chunksize;
939 source += chunksize;
940 count -= chunksize;
941 hvsi_push(hp);
942 }
943
944 if (hp->n_outbuf > 0) {
945 /*
946 * we weren't able to write it all to the hypervisor.
947 * schedule another push attempt.
948 */
949 schedule_delayed_work(&hp->writer, 10);
950 }
951
952 out:
953 spin_unlock_irqrestore(&hp->lock, flags);
954
955 if (total != origcount)
956 pr_debug("%s: wanted %i, only wrote %i\n", __func__, origcount,
957 total);
958
959 return total;
960 }
961
962 /*
963 * I have never seen throttle or unthrottle called, so this little throttle
964 * buffering scheme may or may not work.
965 */
hvsi_throttle(struct tty_struct * tty)966 static void hvsi_throttle(struct tty_struct *tty)
967 {
968 struct hvsi_struct *hp = tty->driver_data;
969
970 pr_debug("%s\n", __func__);
971
972 h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE);
973 }
974
hvsi_unthrottle(struct tty_struct * tty)975 static void hvsi_unthrottle(struct tty_struct *tty)
976 {
977 struct hvsi_struct *hp = tty->driver_data;
978 unsigned long flags;
979
980 pr_debug("%s\n", __func__);
981
982 spin_lock_irqsave(&hp->lock, flags);
983 if (hp->n_throttle) {
984 hvsi_send_overflow(hp);
985 tty_flip_buffer_push(&hp->port);
986 }
987 spin_unlock_irqrestore(&hp->lock, flags);
988
989
990 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
991 }
992
hvsi_tiocmget(struct tty_struct * tty)993 static int hvsi_tiocmget(struct tty_struct *tty)
994 {
995 struct hvsi_struct *hp = tty->driver_data;
996
997 hvsi_get_mctrl(hp);
998 return hp->mctrl;
999 }
1000
hvsi_tiocmset(struct tty_struct * tty,unsigned int set,unsigned int clear)1001 static int hvsi_tiocmset(struct tty_struct *tty,
1002 unsigned int set, unsigned int clear)
1003 {
1004 struct hvsi_struct *hp = tty->driver_data;
1005 unsigned long flags;
1006 uint16_t new_mctrl;
1007
1008 /* we can only alter DTR */
1009 clear &= TIOCM_DTR;
1010 set &= TIOCM_DTR;
1011
1012 spin_lock_irqsave(&hp->lock, flags);
1013
1014 new_mctrl = (hp->mctrl & ~clear) | set;
1015
1016 if (hp->mctrl != new_mctrl) {
1017 hvsi_set_mctrl(hp, new_mctrl);
1018 hp->mctrl = new_mctrl;
1019 }
1020 spin_unlock_irqrestore(&hp->lock, flags);
1021
1022 return 0;
1023 }
1024
1025
1026 static const struct tty_operations hvsi_ops = {
1027 .open = hvsi_open,
1028 .close = hvsi_close,
1029 .write = hvsi_write,
1030 .hangup = hvsi_hangup,
1031 .write_room = hvsi_write_room,
1032 .chars_in_buffer = hvsi_chars_in_buffer,
1033 .throttle = hvsi_throttle,
1034 .unthrottle = hvsi_unthrottle,
1035 .tiocmget = hvsi_tiocmget,
1036 .tiocmset = hvsi_tiocmset,
1037 };
1038
hvsi_init(void)1039 static int __init hvsi_init(void)
1040 {
1041 int i, ret;
1042
1043 hvsi_driver = alloc_tty_driver(hvsi_count);
1044 if (!hvsi_driver)
1045 return -ENOMEM;
1046
1047 hvsi_driver->driver_name = "hvsi";
1048 hvsi_driver->name = "hvsi";
1049 hvsi_driver->major = HVSI_MAJOR;
1050 hvsi_driver->minor_start = HVSI_MINOR;
1051 hvsi_driver->type = TTY_DRIVER_TYPE_SYSTEM;
1052 hvsi_driver->init_termios = tty_std_termios;
1053 hvsi_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
1054 hvsi_driver->init_termios.c_ispeed = 9600;
1055 hvsi_driver->init_termios.c_ospeed = 9600;
1056 hvsi_driver->flags = TTY_DRIVER_REAL_RAW;
1057 tty_set_operations(hvsi_driver, &hvsi_ops);
1058
1059 for (i=0; i < hvsi_count; i++) {
1060 struct hvsi_struct *hp = &hvsi_ports[i];
1061 int ret = 1;
1062
1063 tty_port_link_device(&hp->port, hvsi_driver, i);
1064
1065 ret = request_irq(hp->virq, hvsi_interrupt, 0, "hvsi", hp);
1066 if (ret)
1067 printk(KERN_ERR "HVSI: couldn't reserve irq 0x%x (error %i)\n",
1068 hp->virq, ret);
1069 }
1070 hvsi_wait = wait_for_state; /* irqs active now */
1071
1072 ret = tty_register_driver(hvsi_driver);
1073 if (ret) {
1074 pr_err("Couldn't register hvsi console driver\n");
1075 goto err_free_irq;
1076 }
1077
1078 printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count);
1079
1080 return 0;
1081 err_free_irq:
1082 hvsi_wait = poll_for_state;
1083 for (i = 0; i < hvsi_count; i++) {
1084 struct hvsi_struct *hp = &hvsi_ports[i];
1085
1086 free_irq(hp->virq, hp);
1087 }
1088 tty_driver_kref_put(hvsi_driver);
1089
1090 return ret;
1091 }
1092 device_initcall(hvsi_init);
1093
1094 /***** console (not tty) code: *****/
1095
hvsi_console_print(struct console * console,const char * buf,unsigned int count)1096 static void hvsi_console_print(struct console *console, const char *buf,
1097 unsigned int count)
1098 {
1099 struct hvsi_struct *hp = &hvsi_ports[console->index];
1100 char c[HVSI_MAX_OUTGOING_DATA] __ALIGNED__;
1101 unsigned int i = 0, n = 0;
1102 int ret, donecr = 0;
1103
1104 mb();
1105 if (!is_open(hp))
1106 return;
1107
1108 /*
1109 * ugh, we have to translate LF -> CRLF ourselves, in place.
1110 * copied from hvc_console.c:
1111 */
1112 while (count > 0 || i > 0) {
1113 if (count > 0 && i < sizeof(c)) {
1114 if (buf[n] == '\n' && !donecr) {
1115 c[i++] = '\r';
1116 donecr = 1;
1117 } else {
1118 c[i++] = buf[n++];
1119 donecr = 0;
1120 --count;
1121 }
1122 } else {
1123 ret = hvsi_put_chars(hp, c, i);
1124 if (ret < 0)
1125 i = 0;
1126 i -= ret;
1127 }
1128 }
1129 }
1130
hvsi_console_device(struct console * console,int * index)1131 static struct tty_driver *hvsi_console_device(struct console *console,
1132 int *index)
1133 {
1134 *index = console->index;
1135 return hvsi_driver;
1136 }
1137
hvsi_console_setup(struct console * console,char * options)1138 static int __init hvsi_console_setup(struct console *console, char *options)
1139 {
1140 struct hvsi_struct *hp;
1141 int ret;
1142
1143 if (console->index < 0 || console->index >= hvsi_count)
1144 return -EINVAL;
1145 hp = &hvsi_ports[console->index];
1146
1147 /* give the FSP a chance to change the baud rate when we re-open */
1148 hvsi_close_protocol(hp);
1149
1150 ret = hvsi_handshake(hp);
1151 if (ret < 0)
1152 return ret;
1153
1154 ret = hvsi_get_mctrl(hp);
1155 if (ret < 0)
1156 return ret;
1157
1158 ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
1159 if (ret < 0)
1160 return ret;
1161
1162 hp->flags |= HVSI_CONSOLE;
1163
1164 return 0;
1165 }
1166
1167 static struct console hvsi_console = {
1168 .name = "hvsi",
1169 .write = hvsi_console_print,
1170 .device = hvsi_console_device,
1171 .setup = hvsi_console_setup,
1172 .flags = CON_PRINTBUFFER,
1173 .index = -1,
1174 };
1175
hvsi_console_init(void)1176 static int __init hvsi_console_init(void)
1177 {
1178 struct device_node *vty;
1179
1180 hvsi_wait = poll_for_state; /* no irqs yet; must poll */
1181
1182 /* search device tree for vty nodes */
1183 for_each_compatible_node(vty, "serial", "hvterm-protocol") {
1184 struct hvsi_struct *hp;
1185 const __be32 *vtermno, *irq;
1186
1187 vtermno = of_get_property(vty, "reg", NULL);
1188 irq = of_get_property(vty, "interrupts", NULL);
1189 if (!vtermno || !irq)
1190 continue;
1191
1192 if (hvsi_count >= MAX_NR_HVSI_CONSOLES) {
1193 of_node_put(vty);
1194 break;
1195 }
1196
1197 hp = &hvsi_ports[hvsi_count];
1198 INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
1199 INIT_WORK(&hp->handshaker, hvsi_handshaker);
1200 init_waitqueue_head(&hp->emptyq);
1201 init_waitqueue_head(&hp->stateq);
1202 spin_lock_init(&hp->lock);
1203 tty_port_init(&hp->port);
1204 hp->index = hvsi_count;
1205 hp->inbuf_end = hp->inbuf;
1206 hp->state = HVSI_CLOSED;
1207 hp->vtermno = be32_to_cpup(vtermno);
1208 hp->virq = irq_create_mapping(NULL, be32_to_cpup(irq));
1209 if (hp->virq == 0) {
1210 printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
1211 __func__, be32_to_cpup(irq));
1212 tty_port_destroy(&hp->port);
1213 continue;
1214 }
1215
1216 hvsi_count++;
1217 }
1218
1219 if (hvsi_count)
1220 register_console(&hvsi_console);
1221 return 0;
1222 }
1223 console_initcall(hvsi_console_init);
1224