1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2004 Hollis Blanchard <hollisb@us.ibm.com>, IBM
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun /* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS
7*4882a593Smuzhiyun * and the service processor on IBM pSeries servers. On these servers, there
8*4882a593Smuzhiyun * are no serial ports under the OS's control, and sometimes there is no other
9*4882a593Smuzhiyun * console available either. However, the service processor has two standard
10*4882a593Smuzhiyun * serial ports, so this over-complicated protocol allows the OS to control
11*4882a593Smuzhiyun * those ports by proxy.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Besides data, the procotol supports the reading/writing of the serial
14*4882a593Smuzhiyun * port's DTR line, and the reading of the CD line. This is to allow the OS to
15*4882a593Smuzhiyun * control a modem attached to the service processor's serial port. Note that
16*4882a593Smuzhiyun * the OS cannot change the speed of the port through this protocol.
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #undef DEBUG
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <linux/console.h>
22*4882a593Smuzhiyun #include <linux/ctype.h>
23*4882a593Smuzhiyun #include <linux/delay.h>
24*4882a593Smuzhiyun #include <linux/init.h>
25*4882a593Smuzhiyun #include <linux/interrupt.h>
26*4882a593Smuzhiyun #include <linux/module.h>
27*4882a593Smuzhiyun #include <linux/major.h>
28*4882a593Smuzhiyun #include <linux/kernel.h>
29*4882a593Smuzhiyun #include <linux/spinlock.h>
30*4882a593Smuzhiyun #include <linux/sysrq.h>
31*4882a593Smuzhiyun #include <linux/tty.h>
32*4882a593Smuzhiyun #include <linux/tty_flip.h>
33*4882a593Smuzhiyun #include <asm/hvcall.h>
34*4882a593Smuzhiyun #include <asm/hvconsole.h>
35*4882a593Smuzhiyun #include <asm/prom.h>
36*4882a593Smuzhiyun #include <linux/uaccess.h>
37*4882a593Smuzhiyun #include <asm/vio.h>
38*4882a593Smuzhiyun #include <asm/param.h>
39*4882a593Smuzhiyun #include <asm/hvsi.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #define HVSI_MAJOR 229
42*4882a593Smuzhiyun #define HVSI_MINOR 128
43*4882a593Smuzhiyun #define MAX_NR_HVSI_CONSOLES 4
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #define HVSI_TIMEOUT (5*HZ)
46*4882a593Smuzhiyun #define HVSI_VERSION 1
47*4882a593Smuzhiyun #define HVSI_MAX_PACKET 256
48*4882a593Smuzhiyun #define HVSI_MAX_READ 16
49*4882a593Smuzhiyun #define HVSI_MAX_OUTGOING_DATA 12
50*4882a593Smuzhiyun #define N_OUTBUF 12
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * we pass data via two 8-byte registers, so we would like our char arrays
54*4882a593Smuzhiyun * properly aligned for those loads.
55*4882a593Smuzhiyun */
56*4882a593Smuzhiyun #define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct hvsi_struct {
59*4882a593Smuzhiyun struct tty_port port;
60*4882a593Smuzhiyun struct delayed_work writer;
61*4882a593Smuzhiyun struct work_struct handshaker;
62*4882a593Smuzhiyun wait_queue_head_t emptyq; /* woken when outbuf is emptied */
63*4882a593Smuzhiyun wait_queue_head_t stateq; /* woken when HVSI state changes */
64*4882a593Smuzhiyun spinlock_t lock;
65*4882a593Smuzhiyun int index;
66*4882a593Smuzhiyun uint8_t throttle_buf[128];
67*4882a593Smuzhiyun uint8_t outbuf[N_OUTBUF]; /* to implement write_room and chars_in_buffer */
68*4882a593Smuzhiyun /* inbuf is for packet reassembly. leave a little room for leftovers. */
69*4882a593Smuzhiyun uint8_t inbuf[HVSI_MAX_PACKET + HVSI_MAX_READ];
70*4882a593Smuzhiyun uint8_t *inbuf_end;
71*4882a593Smuzhiyun int n_throttle;
72*4882a593Smuzhiyun int n_outbuf;
73*4882a593Smuzhiyun uint32_t vtermno;
74*4882a593Smuzhiyun uint32_t virq;
75*4882a593Smuzhiyun atomic_t seqno; /* HVSI packet sequence number */
76*4882a593Smuzhiyun uint16_t mctrl;
77*4882a593Smuzhiyun uint8_t state; /* HVSI protocol state */
78*4882a593Smuzhiyun uint8_t flags;
79*4882a593Smuzhiyun #ifdef CONFIG_MAGIC_SYSRQ
80*4882a593Smuzhiyun uint8_t sysrq;
81*4882a593Smuzhiyun #endif /* CONFIG_MAGIC_SYSRQ */
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun static struct hvsi_struct hvsi_ports[MAX_NR_HVSI_CONSOLES];
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun static struct tty_driver *hvsi_driver;
86*4882a593Smuzhiyun static int hvsi_count;
87*4882a593Smuzhiyun static int (*hvsi_wait)(struct hvsi_struct *hp, int state);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun enum HVSI_PROTOCOL_STATE {
90*4882a593Smuzhiyun HVSI_CLOSED,
91*4882a593Smuzhiyun HVSI_WAIT_FOR_VER_RESPONSE,
92*4882a593Smuzhiyun HVSI_WAIT_FOR_VER_QUERY,
93*4882a593Smuzhiyun HVSI_OPEN,
94*4882a593Smuzhiyun HVSI_WAIT_FOR_MCTRL_RESPONSE,
95*4882a593Smuzhiyun HVSI_FSP_DIED,
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun #define HVSI_CONSOLE 0x1
98*4882a593Smuzhiyun
is_console(struct hvsi_struct * hp)99*4882a593Smuzhiyun static inline int is_console(struct hvsi_struct *hp)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun return hp->flags & HVSI_CONSOLE;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
is_open(struct hvsi_struct * hp)104*4882a593Smuzhiyun static inline int is_open(struct hvsi_struct *hp)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun /* if we're waiting for an mctrl then we're already open */
107*4882a593Smuzhiyun return (hp->state == HVSI_OPEN)
108*4882a593Smuzhiyun || (hp->state == HVSI_WAIT_FOR_MCTRL_RESPONSE);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
print_state(struct hvsi_struct * hp)111*4882a593Smuzhiyun static inline void print_state(struct hvsi_struct *hp)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun #ifdef DEBUG
114*4882a593Smuzhiyun static const char *state_names[] = {
115*4882a593Smuzhiyun "HVSI_CLOSED",
116*4882a593Smuzhiyun "HVSI_WAIT_FOR_VER_RESPONSE",
117*4882a593Smuzhiyun "HVSI_WAIT_FOR_VER_QUERY",
118*4882a593Smuzhiyun "HVSI_OPEN",
119*4882a593Smuzhiyun "HVSI_WAIT_FOR_MCTRL_RESPONSE",
120*4882a593Smuzhiyun "HVSI_FSP_DIED",
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun const char *name = (hp->state < ARRAY_SIZE(state_names))
123*4882a593Smuzhiyun ? state_names[hp->state] : "UNKNOWN";
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun pr_debug("hvsi%i: state = %s\n", hp->index, name);
126*4882a593Smuzhiyun #endif /* DEBUG */
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
__set_state(struct hvsi_struct * hp,int state)129*4882a593Smuzhiyun static inline void __set_state(struct hvsi_struct *hp, int state)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun hp->state = state;
132*4882a593Smuzhiyun print_state(hp);
133*4882a593Smuzhiyun wake_up_all(&hp->stateq);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
set_state(struct hvsi_struct * hp,int state)136*4882a593Smuzhiyun static inline void set_state(struct hvsi_struct *hp, int state)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun unsigned long flags;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun spin_lock_irqsave(&hp->lock, flags);
141*4882a593Smuzhiyun __set_state(hp, state);
142*4882a593Smuzhiyun spin_unlock_irqrestore(&hp->lock, flags);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
len_packet(const uint8_t * packet)145*4882a593Smuzhiyun static inline int len_packet(const uint8_t *packet)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun return (int)((struct hvsi_header *)packet)->len;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
is_header(const uint8_t * packet)150*4882a593Smuzhiyun static inline int is_header(const uint8_t *packet)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun struct hvsi_header *header = (struct hvsi_header *)packet;
153*4882a593Smuzhiyun return header->type >= VS_QUERY_RESPONSE_PACKET_HEADER;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
got_packet(const struct hvsi_struct * hp,uint8_t * packet)156*4882a593Smuzhiyun static inline int got_packet(const struct hvsi_struct *hp, uint8_t *packet)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun if (hp->inbuf_end < packet + sizeof(struct hvsi_header))
159*4882a593Smuzhiyun return 0; /* don't even have the packet header */
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (hp->inbuf_end < (packet + len_packet(packet)))
162*4882a593Smuzhiyun return 0; /* don't have the rest of the packet */
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun return 1;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* shift remaining bytes in packetbuf down */
compact_inbuf(struct hvsi_struct * hp,uint8_t * read_to)168*4882a593Smuzhiyun static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun int remaining = (int)(hp->inbuf_end - read_to);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun pr_debug("%s: %i chars remain\n", __func__, remaining);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (read_to != hp->inbuf)
175*4882a593Smuzhiyun memmove(hp->inbuf, read_to, remaining);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun hp->inbuf_end = hp->inbuf + remaining;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun #ifdef DEBUG
181*4882a593Smuzhiyun #define dbg_dump_packet(packet) dump_packet(packet)
182*4882a593Smuzhiyun #define dbg_dump_hex(data, len) dump_hex(data, len)
183*4882a593Smuzhiyun #else
184*4882a593Smuzhiyun #define dbg_dump_packet(packet) do { } while (0)
185*4882a593Smuzhiyun #define dbg_dump_hex(data, len) do { } while (0)
186*4882a593Smuzhiyun #endif
187*4882a593Smuzhiyun
dump_hex(const uint8_t * data,int len)188*4882a593Smuzhiyun static void dump_hex(const uint8_t *data, int len)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun int i;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun printk(" ");
193*4882a593Smuzhiyun for (i=0; i < len; i++)
194*4882a593Smuzhiyun printk("%.2x", data[i]);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun printk("\n ");
197*4882a593Smuzhiyun for (i=0; i < len; i++) {
198*4882a593Smuzhiyun if (isprint(data[i]))
199*4882a593Smuzhiyun printk("%c", data[i]);
200*4882a593Smuzhiyun else
201*4882a593Smuzhiyun printk(".");
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun printk("\n");
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
dump_packet(uint8_t * packet)206*4882a593Smuzhiyun static void dump_packet(uint8_t *packet)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct hvsi_header *header = (struct hvsi_header *)packet;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun printk("type 0x%x, len %i, seqno %i:\n", header->type, header->len,
211*4882a593Smuzhiyun header->seqno);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun dump_hex(packet, header->len);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
hvsi_read(struct hvsi_struct * hp,char * buf,int count)216*4882a593Smuzhiyun static int hvsi_read(struct hvsi_struct *hp, char *buf, int count)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun unsigned long got;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun got = hvc_get_chars(hp->vtermno, buf, count);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return got;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
hvsi_recv_control(struct hvsi_struct * hp,uint8_t * packet,struct tty_struct * tty,struct hvsi_struct ** to_handshake)225*4882a593Smuzhiyun static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet,
226*4882a593Smuzhiyun struct tty_struct *tty, struct hvsi_struct **to_handshake)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct hvsi_control *header = (struct hvsi_control *)packet;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun switch (be16_to_cpu(header->verb)) {
231*4882a593Smuzhiyun case VSV_MODEM_CTL_UPDATE:
232*4882a593Smuzhiyun if ((be32_to_cpu(header->word) & HVSI_TSCD) == 0) {
233*4882a593Smuzhiyun /* CD went away; no more connection */
234*4882a593Smuzhiyun pr_debug("hvsi%i: CD dropped\n", hp->index);
235*4882a593Smuzhiyun hp->mctrl &= TIOCM_CD;
236*4882a593Smuzhiyun if (tty && !C_CLOCAL(tty))
237*4882a593Smuzhiyun tty_hangup(tty);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun break;
240*4882a593Smuzhiyun case VSV_CLOSE_PROTOCOL:
241*4882a593Smuzhiyun pr_debug("hvsi%i: service processor came back\n", hp->index);
242*4882a593Smuzhiyun if (hp->state != HVSI_CLOSED) {
243*4882a593Smuzhiyun *to_handshake = hp;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun break;
246*4882a593Smuzhiyun default:
247*4882a593Smuzhiyun printk(KERN_WARNING "hvsi%i: unknown HVSI control packet: ",
248*4882a593Smuzhiyun hp->index);
249*4882a593Smuzhiyun dump_packet(packet);
250*4882a593Smuzhiyun break;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
hvsi_recv_response(struct hvsi_struct * hp,uint8_t * packet)254*4882a593Smuzhiyun static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun struct hvsi_query_response *resp = (struct hvsi_query_response *)packet;
257*4882a593Smuzhiyun uint32_t mctrl_word;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun switch (hp->state) {
260*4882a593Smuzhiyun case HVSI_WAIT_FOR_VER_RESPONSE:
261*4882a593Smuzhiyun __set_state(hp, HVSI_WAIT_FOR_VER_QUERY);
262*4882a593Smuzhiyun break;
263*4882a593Smuzhiyun case HVSI_WAIT_FOR_MCTRL_RESPONSE:
264*4882a593Smuzhiyun hp->mctrl = 0;
265*4882a593Smuzhiyun mctrl_word = be32_to_cpu(resp->u.mctrl_word);
266*4882a593Smuzhiyun if (mctrl_word & HVSI_TSDTR)
267*4882a593Smuzhiyun hp->mctrl |= TIOCM_DTR;
268*4882a593Smuzhiyun if (mctrl_word & HVSI_TSCD)
269*4882a593Smuzhiyun hp->mctrl |= TIOCM_CD;
270*4882a593Smuzhiyun __set_state(hp, HVSI_OPEN);
271*4882a593Smuzhiyun break;
272*4882a593Smuzhiyun default:
273*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: unexpected query response: ", hp->index);
274*4882a593Smuzhiyun dump_packet(packet);
275*4882a593Smuzhiyun break;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* respond to service processor's version query */
hvsi_version_respond(struct hvsi_struct * hp,uint16_t query_seqno)280*4882a593Smuzhiyun static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun struct hvsi_query_response packet __ALIGNED__;
283*4882a593Smuzhiyun int wrote;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
286*4882a593Smuzhiyun packet.hdr.len = sizeof(struct hvsi_query_response);
287*4882a593Smuzhiyun packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
288*4882a593Smuzhiyun packet.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
289*4882a593Smuzhiyun packet.u.version = HVSI_VERSION;
290*4882a593Smuzhiyun packet.query_seqno = cpu_to_be16(query_seqno+1);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
293*4882a593Smuzhiyun dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
296*4882a593Smuzhiyun if (wrote != packet.hdr.len) {
297*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: couldn't send query response!\n",
298*4882a593Smuzhiyun hp->index);
299*4882a593Smuzhiyun return -EIO;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun return 0;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
hvsi_recv_query(struct hvsi_struct * hp,uint8_t * packet)305*4882a593Smuzhiyun static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct hvsi_query *query = (struct hvsi_query *)packet;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun switch (hp->state) {
310*4882a593Smuzhiyun case HVSI_WAIT_FOR_VER_QUERY:
311*4882a593Smuzhiyun hvsi_version_respond(hp, be16_to_cpu(query->hdr.seqno));
312*4882a593Smuzhiyun __set_state(hp, HVSI_OPEN);
313*4882a593Smuzhiyun break;
314*4882a593Smuzhiyun default:
315*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: unexpected query: ", hp->index);
316*4882a593Smuzhiyun dump_packet(packet);
317*4882a593Smuzhiyun break;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
hvsi_insert_chars(struct hvsi_struct * hp,const char * buf,int len)321*4882a593Smuzhiyun static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun int i;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun for (i=0; i < len; i++) {
326*4882a593Smuzhiyun char c = buf[i];
327*4882a593Smuzhiyun #ifdef CONFIG_MAGIC_SYSRQ
328*4882a593Smuzhiyun if (c == '\0') {
329*4882a593Smuzhiyun hp->sysrq = 1;
330*4882a593Smuzhiyun continue;
331*4882a593Smuzhiyun } else if (hp->sysrq) {
332*4882a593Smuzhiyun handle_sysrq(c);
333*4882a593Smuzhiyun hp->sysrq = 0;
334*4882a593Smuzhiyun continue;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun #endif /* CONFIG_MAGIC_SYSRQ */
337*4882a593Smuzhiyun tty_insert_flip_char(&hp->port, c, 0);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun * We could get 252 bytes of data at once here. But the tty layer only
343*4882a593Smuzhiyun * throttles us at TTY_THRESHOLD_THROTTLE (128) bytes, so we could overflow
344*4882a593Smuzhiyun * it. Accordingly we won't send more than 128 bytes at a time to the flip
345*4882a593Smuzhiyun * buffer, which will give the tty buffer a chance to throttle us. Should the
346*4882a593Smuzhiyun * value of TTY_THRESHOLD_THROTTLE change in n_tty.c, this code should be
347*4882a593Smuzhiyun * revisited.
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun #define TTY_THRESHOLD_THROTTLE 128
hvsi_recv_data(struct hvsi_struct * hp,const uint8_t * packet)350*4882a593Smuzhiyun static bool hvsi_recv_data(struct hvsi_struct *hp, const uint8_t *packet)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun const struct hvsi_header *header = (const struct hvsi_header *)packet;
353*4882a593Smuzhiyun const uint8_t *data = packet + sizeof(struct hvsi_header);
354*4882a593Smuzhiyun int datalen = header->len - sizeof(struct hvsi_header);
355*4882a593Smuzhiyun int overflow = datalen - TTY_THRESHOLD_THROTTLE;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun pr_debug("queueing %i chars '%.*s'\n", datalen, datalen, data);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (datalen == 0)
360*4882a593Smuzhiyun return false;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (overflow > 0) {
363*4882a593Smuzhiyun pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__);
364*4882a593Smuzhiyun datalen = TTY_THRESHOLD_THROTTLE;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun hvsi_insert_chars(hp, data, datalen);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (overflow > 0) {
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * we still have more data to deliver, so we need to save off the
372*4882a593Smuzhiyun * overflow and send it later
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun pr_debug("%s: deferring overflow\n", __func__);
375*4882a593Smuzhiyun memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow);
376*4882a593Smuzhiyun hp->n_throttle = overflow;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return true;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /*
383*4882a593Smuzhiyun * Returns true/false indicating data successfully read from hypervisor.
384*4882a593Smuzhiyun * Used both to get packets for tty connections and to advance the state
385*4882a593Smuzhiyun * machine during console handshaking (in which case tty = NULL and we ignore
386*4882a593Smuzhiyun * incoming data).
387*4882a593Smuzhiyun */
hvsi_load_chunk(struct hvsi_struct * hp,struct tty_struct * tty,struct hvsi_struct ** handshake)388*4882a593Smuzhiyun static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct *tty,
389*4882a593Smuzhiyun struct hvsi_struct **handshake)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun uint8_t *packet = hp->inbuf;
392*4882a593Smuzhiyun int chunklen;
393*4882a593Smuzhiyun bool flip = false;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun *handshake = NULL;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ);
398*4882a593Smuzhiyun if (chunklen == 0) {
399*4882a593Smuzhiyun pr_debug("%s: 0-length read\n", __func__);
400*4882a593Smuzhiyun return 0;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun pr_debug("%s: got %i bytes\n", __func__, chunklen);
404*4882a593Smuzhiyun dbg_dump_hex(hp->inbuf_end, chunklen);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun hp->inbuf_end += chunklen;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* handle all completed packets */
409*4882a593Smuzhiyun while ((packet < hp->inbuf_end) && got_packet(hp, packet)) {
410*4882a593Smuzhiyun struct hvsi_header *header = (struct hvsi_header *)packet;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (!is_header(packet)) {
413*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: got malformed packet\n", hp->index);
414*4882a593Smuzhiyun /* skip bytes until we find a header or run out of data */
415*4882a593Smuzhiyun while ((packet < hp->inbuf_end) && (!is_header(packet)))
416*4882a593Smuzhiyun packet++;
417*4882a593Smuzhiyun continue;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun pr_debug("%s: handling %i-byte packet\n", __func__,
421*4882a593Smuzhiyun len_packet(packet));
422*4882a593Smuzhiyun dbg_dump_packet(packet);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun switch (header->type) {
425*4882a593Smuzhiyun case VS_DATA_PACKET_HEADER:
426*4882a593Smuzhiyun if (!is_open(hp))
427*4882a593Smuzhiyun break;
428*4882a593Smuzhiyun flip = hvsi_recv_data(hp, packet);
429*4882a593Smuzhiyun break;
430*4882a593Smuzhiyun case VS_CONTROL_PACKET_HEADER:
431*4882a593Smuzhiyun hvsi_recv_control(hp, packet, tty, handshake);
432*4882a593Smuzhiyun break;
433*4882a593Smuzhiyun case VS_QUERY_RESPONSE_PACKET_HEADER:
434*4882a593Smuzhiyun hvsi_recv_response(hp, packet);
435*4882a593Smuzhiyun break;
436*4882a593Smuzhiyun case VS_QUERY_PACKET_HEADER:
437*4882a593Smuzhiyun hvsi_recv_query(hp, packet);
438*4882a593Smuzhiyun break;
439*4882a593Smuzhiyun default:
440*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: unknown HVSI packet type 0x%x\n",
441*4882a593Smuzhiyun hp->index, header->type);
442*4882a593Smuzhiyun dump_packet(packet);
443*4882a593Smuzhiyun break;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun packet += len_packet(packet);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (*handshake) {
449*4882a593Smuzhiyun pr_debug("%s: handshake\n", __func__);
450*4882a593Smuzhiyun break;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun compact_inbuf(hp, packet);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (flip)
457*4882a593Smuzhiyun tty_flip_buffer_push(&hp->port);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun return 1;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
hvsi_send_overflow(struct hvsi_struct * hp)462*4882a593Smuzhiyun static void hvsi_send_overflow(struct hvsi_struct *hp)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun pr_debug("%s: delivering %i bytes overflow\n", __func__,
465*4882a593Smuzhiyun hp->n_throttle);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle);
468*4882a593Smuzhiyun hp->n_throttle = 0;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun * must get all pending data because we only get an irq on empty->non-empty
473*4882a593Smuzhiyun * transition
474*4882a593Smuzhiyun */
hvsi_interrupt(int irq,void * arg)475*4882a593Smuzhiyun static irqreturn_t hvsi_interrupt(int irq, void *arg)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun struct hvsi_struct *hp = (struct hvsi_struct *)arg;
478*4882a593Smuzhiyun struct hvsi_struct *handshake;
479*4882a593Smuzhiyun struct tty_struct *tty;
480*4882a593Smuzhiyun unsigned long flags;
481*4882a593Smuzhiyun int again = 1;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun pr_debug("%s\n", __func__);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun tty = tty_port_tty_get(&hp->port);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun while (again) {
488*4882a593Smuzhiyun spin_lock_irqsave(&hp->lock, flags);
489*4882a593Smuzhiyun again = hvsi_load_chunk(hp, tty, &handshake);
490*4882a593Smuzhiyun spin_unlock_irqrestore(&hp->lock, flags);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (handshake) {
493*4882a593Smuzhiyun pr_debug("hvsi%i: attempting re-handshake\n", handshake->index);
494*4882a593Smuzhiyun schedule_work(&handshake->handshaker);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun spin_lock_irqsave(&hp->lock, flags);
499*4882a593Smuzhiyun if (tty && hp->n_throttle && !tty_throttled(tty)) {
500*4882a593Smuzhiyun /* we weren't hung up and we weren't throttled, so we can
501*4882a593Smuzhiyun * deliver the rest now */
502*4882a593Smuzhiyun hvsi_send_overflow(hp);
503*4882a593Smuzhiyun tty_flip_buffer_push(&hp->port);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun spin_unlock_irqrestore(&hp->lock, flags);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun tty_kref_put(tty);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun return IRQ_HANDLED;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /* for boot console, before the irq handler is running */
poll_for_state(struct hvsi_struct * hp,int state)513*4882a593Smuzhiyun static int __init poll_for_state(struct hvsi_struct *hp, int state)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun unsigned long end_jiffies = jiffies + HVSI_TIMEOUT;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun for (;;) {
518*4882a593Smuzhiyun hvsi_interrupt(hp->virq, (void *)hp); /* get pending data */
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun if (hp->state == state)
521*4882a593Smuzhiyun return 0;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun mdelay(5);
524*4882a593Smuzhiyun if (time_after(jiffies, end_jiffies))
525*4882a593Smuzhiyun return -EIO;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* wait for irq handler to change our state */
wait_for_state(struct hvsi_struct * hp,int state)530*4882a593Smuzhiyun static int wait_for_state(struct hvsi_struct *hp, int state)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun int ret = 0;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun if (!wait_event_timeout(hp->stateq, (hp->state == state), HVSI_TIMEOUT))
535*4882a593Smuzhiyun ret = -EIO;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun return ret;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
hvsi_query(struct hvsi_struct * hp,uint16_t verb)540*4882a593Smuzhiyun static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun struct hvsi_query packet __ALIGNED__;
543*4882a593Smuzhiyun int wrote;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun packet.hdr.type = VS_QUERY_PACKET_HEADER;
546*4882a593Smuzhiyun packet.hdr.len = sizeof(struct hvsi_query);
547*4882a593Smuzhiyun packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
548*4882a593Smuzhiyun packet.verb = cpu_to_be16(verb);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
551*4882a593Smuzhiyun dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
554*4882a593Smuzhiyun if (wrote != packet.hdr.len) {
555*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: couldn't send query (%i)!\n", hp->index,
556*4882a593Smuzhiyun wrote);
557*4882a593Smuzhiyun return -EIO;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun return 0;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
hvsi_get_mctrl(struct hvsi_struct * hp)563*4882a593Smuzhiyun static int hvsi_get_mctrl(struct hvsi_struct *hp)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun int ret;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun set_state(hp, HVSI_WAIT_FOR_MCTRL_RESPONSE);
568*4882a593Smuzhiyun hvsi_query(hp, VSV_SEND_MODEM_CTL_STATUS);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun ret = hvsi_wait(hp, HVSI_OPEN);
571*4882a593Smuzhiyun if (ret < 0) {
572*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: didn't get modem flags\n", hp->index);
573*4882a593Smuzhiyun set_state(hp, HVSI_OPEN);
574*4882a593Smuzhiyun return ret;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun pr_debug("%s: mctrl 0x%x\n", __func__, hp->mctrl);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun return 0;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /* note that we can only set DTR */
hvsi_set_mctrl(struct hvsi_struct * hp,uint16_t mctrl)583*4882a593Smuzhiyun static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun struct hvsi_control packet __ALIGNED__;
586*4882a593Smuzhiyun int wrote;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun packet.hdr.type = VS_CONTROL_PACKET_HEADER;
589*4882a593Smuzhiyun packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
590*4882a593Smuzhiyun packet.hdr.len = sizeof(struct hvsi_control);
591*4882a593Smuzhiyun packet.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
592*4882a593Smuzhiyun packet.mask = cpu_to_be32(HVSI_TSDTR);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (mctrl & TIOCM_DTR)
595*4882a593Smuzhiyun packet.word = cpu_to_be32(HVSI_TSDTR);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
598*4882a593Smuzhiyun dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
601*4882a593Smuzhiyun if (wrote != packet.hdr.len) {
602*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: couldn't set DTR!\n", hp->index);
603*4882a593Smuzhiyun return -EIO;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun return 0;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
hvsi_drain_input(struct hvsi_struct * hp)609*4882a593Smuzhiyun static void hvsi_drain_input(struct hvsi_struct *hp)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun uint8_t buf[HVSI_MAX_READ] __ALIGNED__;
612*4882a593Smuzhiyun unsigned long end_jiffies = jiffies + HVSI_TIMEOUT;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun while (time_before(end_jiffies, jiffies))
615*4882a593Smuzhiyun if (0 == hvsi_read(hp, buf, HVSI_MAX_READ))
616*4882a593Smuzhiyun break;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
hvsi_handshake(struct hvsi_struct * hp)619*4882a593Smuzhiyun static int hvsi_handshake(struct hvsi_struct *hp)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun int ret;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /*
624*4882a593Smuzhiyun * We could have a CLOSE or other data waiting for us before we even try
625*4882a593Smuzhiyun * to open; try to throw it all away so we don't get confused. (CLOSE
626*4882a593Smuzhiyun * is the first message sent up the pipe when the FSP comes online. We
627*4882a593Smuzhiyun * need to distinguish between "it came up a while ago and we're the first
628*4882a593Smuzhiyun * user" and "it was just reset before it saw our handshake packet".)
629*4882a593Smuzhiyun */
630*4882a593Smuzhiyun hvsi_drain_input(hp);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun set_state(hp, HVSI_WAIT_FOR_VER_RESPONSE);
633*4882a593Smuzhiyun ret = hvsi_query(hp, VSV_SEND_VERSION_NUMBER);
634*4882a593Smuzhiyun if (ret < 0) {
635*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: couldn't send version query\n", hp->index);
636*4882a593Smuzhiyun return ret;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun ret = hvsi_wait(hp, HVSI_OPEN);
640*4882a593Smuzhiyun if (ret < 0)
641*4882a593Smuzhiyun return ret;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun return 0;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
hvsi_handshaker(struct work_struct * work)646*4882a593Smuzhiyun static void hvsi_handshaker(struct work_struct *work)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun struct hvsi_struct *hp =
649*4882a593Smuzhiyun container_of(work, struct hvsi_struct, handshaker);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (hvsi_handshake(hp) >= 0)
652*4882a593Smuzhiyun return;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: re-handshaking failed\n", hp->index);
655*4882a593Smuzhiyun if (is_console(hp)) {
656*4882a593Smuzhiyun /*
657*4882a593Smuzhiyun * ttys will re-attempt the handshake via hvsi_open, but
658*4882a593Smuzhiyun * the console will not.
659*4882a593Smuzhiyun */
660*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: lost console!\n", hp->index);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
hvsi_put_chars(struct hvsi_struct * hp,const char * buf,int count)664*4882a593Smuzhiyun static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun struct hvsi_data packet __ALIGNED__;
667*4882a593Smuzhiyun int ret;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun packet.hdr.type = VS_DATA_PACKET_HEADER;
672*4882a593Smuzhiyun packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
673*4882a593Smuzhiyun packet.hdr.len = count + sizeof(struct hvsi_header);
674*4882a593Smuzhiyun memcpy(&packet.data, buf, count);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun ret = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
677*4882a593Smuzhiyun if (ret == packet.hdr.len) {
678*4882a593Smuzhiyun /* return the number of chars written, not the packet length */
679*4882a593Smuzhiyun return count;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun return ret; /* return any errors */
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
hvsi_close_protocol(struct hvsi_struct * hp)684*4882a593Smuzhiyun static void hvsi_close_protocol(struct hvsi_struct *hp)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun struct hvsi_control packet __ALIGNED__;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun packet.hdr.type = VS_CONTROL_PACKET_HEADER;
689*4882a593Smuzhiyun packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
690*4882a593Smuzhiyun packet.hdr.len = 6;
691*4882a593Smuzhiyun packet.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
694*4882a593Smuzhiyun dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
hvsi_open(struct tty_struct * tty,struct file * filp)699*4882a593Smuzhiyun static int hvsi_open(struct tty_struct *tty, struct file *filp)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun struct hvsi_struct *hp;
702*4882a593Smuzhiyun unsigned long flags;
703*4882a593Smuzhiyun int ret;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun pr_debug("%s\n", __func__);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun hp = &hvsi_ports[tty->index];
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun tty->driver_data = hp;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun mb();
712*4882a593Smuzhiyun if (hp->state == HVSI_FSP_DIED)
713*4882a593Smuzhiyun return -EIO;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun tty_port_tty_set(&hp->port, tty);
716*4882a593Smuzhiyun spin_lock_irqsave(&hp->lock, flags);
717*4882a593Smuzhiyun hp->port.count++;
718*4882a593Smuzhiyun atomic_set(&hp->seqno, 0);
719*4882a593Smuzhiyun h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
720*4882a593Smuzhiyun spin_unlock_irqrestore(&hp->lock, flags);
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (is_console(hp))
723*4882a593Smuzhiyun return 0; /* this has already been handshaked as the console */
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun ret = hvsi_handshake(hp);
726*4882a593Smuzhiyun if (ret < 0) {
727*4882a593Smuzhiyun printk(KERN_ERR "%s: HVSI handshaking failed\n", tty->name);
728*4882a593Smuzhiyun return ret;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun ret = hvsi_get_mctrl(hp);
732*4882a593Smuzhiyun if (ret < 0) {
733*4882a593Smuzhiyun printk(KERN_ERR "%s: couldn't get initial modem flags\n", tty->name);
734*4882a593Smuzhiyun return ret;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
738*4882a593Smuzhiyun if (ret < 0) {
739*4882a593Smuzhiyun printk(KERN_ERR "%s: couldn't set DTR\n", tty->name);
740*4882a593Smuzhiyun return ret;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun return 0;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun /* wait for hvsi_write_worker to empty hp->outbuf */
hvsi_flush_output(struct hvsi_struct * hp)747*4882a593Smuzhiyun static void hvsi_flush_output(struct hvsi_struct *hp)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun wait_event_timeout(hp->emptyq, (hp->n_outbuf <= 0), HVSI_TIMEOUT);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
752*4882a593Smuzhiyun cancel_delayed_work_sync(&hp->writer);
753*4882a593Smuzhiyun flush_work(&hp->handshaker);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun * it's also possible that our timeout expired and hvsi_write_worker
757*4882a593Smuzhiyun * didn't manage to push outbuf. poof.
758*4882a593Smuzhiyun */
759*4882a593Smuzhiyun hp->n_outbuf = 0;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
hvsi_close(struct tty_struct * tty,struct file * filp)762*4882a593Smuzhiyun static void hvsi_close(struct tty_struct *tty, struct file *filp)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun struct hvsi_struct *hp = tty->driver_data;
765*4882a593Smuzhiyun unsigned long flags;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun pr_debug("%s\n", __func__);
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun if (tty_hung_up_p(filp))
770*4882a593Smuzhiyun return;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun spin_lock_irqsave(&hp->lock, flags);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun if (--hp->port.count == 0) {
775*4882a593Smuzhiyun tty_port_tty_set(&hp->port, NULL);
776*4882a593Smuzhiyun hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun /* only close down connection if it is not the console */
779*4882a593Smuzhiyun if (!is_console(hp)) {
780*4882a593Smuzhiyun h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); /* no more irqs */
781*4882a593Smuzhiyun __set_state(hp, HVSI_CLOSED);
782*4882a593Smuzhiyun /*
783*4882a593Smuzhiyun * any data delivered to the tty layer after this will be
784*4882a593Smuzhiyun * discarded (except for XON/XOFF)
785*4882a593Smuzhiyun */
786*4882a593Smuzhiyun tty->closing = 1;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun spin_unlock_irqrestore(&hp->lock, flags);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /* let any existing irq handlers finish. no more will start. */
791*4882a593Smuzhiyun synchronize_irq(hp->virq);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /* hvsi_write_worker will re-schedule until outbuf is empty. */
794*4882a593Smuzhiyun hvsi_flush_output(hp);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /* tell FSP to stop sending data */
797*4882a593Smuzhiyun hvsi_close_protocol(hp);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun /*
800*4882a593Smuzhiyun * drain anything FSP is still in the middle of sending, and let
801*4882a593Smuzhiyun * hvsi_handshake drain the rest on the next open.
802*4882a593Smuzhiyun */
803*4882a593Smuzhiyun hvsi_drain_input(hp);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun spin_lock_irqsave(&hp->lock, flags);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun } else if (hp->port.count < 0)
808*4882a593Smuzhiyun printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
809*4882a593Smuzhiyun hp - hvsi_ports, hp->port.count);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun spin_unlock_irqrestore(&hp->lock, flags);
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
hvsi_hangup(struct tty_struct * tty)814*4882a593Smuzhiyun static void hvsi_hangup(struct tty_struct *tty)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun struct hvsi_struct *hp = tty->driver_data;
817*4882a593Smuzhiyun unsigned long flags;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun pr_debug("%s\n", __func__);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun tty_port_tty_set(&hp->port, NULL);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun spin_lock_irqsave(&hp->lock, flags);
824*4882a593Smuzhiyun hp->port.count = 0;
825*4882a593Smuzhiyun hp->n_outbuf = 0;
826*4882a593Smuzhiyun spin_unlock_irqrestore(&hp->lock, flags);
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /* called with hp->lock held */
hvsi_push(struct hvsi_struct * hp)830*4882a593Smuzhiyun static void hvsi_push(struct hvsi_struct *hp)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun int n;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun if (hp->n_outbuf <= 0)
835*4882a593Smuzhiyun return;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf);
838*4882a593Smuzhiyun if (n > 0) {
839*4882a593Smuzhiyun /* success */
840*4882a593Smuzhiyun pr_debug("%s: wrote %i chars\n", __func__, n);
841*4882a593Smuzhiyun hp->n_outbuf = 0;
842*4882a593Smuzhiyun } else if (n == -EIO) {
843*4882a593Smuzhiyun __set_state(hp, HVSI_FSP_DIED);
844*4882a593Smuzhiyun printk(KERN_ERR "hvsi%i: service processor died\n", hp->index);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
hvsi_write_worker(struct work_struct * work)849*4882a593Smuzhiyun static void hvsi_write_worker(struct work_struct *work)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun struct hvsi_struct *hp =
852*4882a593Smuzhiyun container_of(work, struct hvsi_struct, writer.work);
853*4882a593Smuzhiyun unsigned long flags;
854*4882a593Smuzhiyun #ifdef DEBUG
855*4882a593Smuzhiyun static long start_j = 0;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun if (start_j == 0)
858*4882a593Smuzhiyun start_j = jiffies;
859*4882a593Smuzhiyun #endif /* DEBUG */
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun spin_lock_irqsave(&hp->lock, flags);
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (!is_open(hp)) {
866*4882a593Smuzhiyun /*
867*4882a593Smuzhiyun * We could have a non-open connection if the service processor died
868*4882a593Smuzhiyun * while we were busily scheduling ourselves. In that case, it could
869*4882a593Smuzhiyun * be minutes before the service processor comes back, so only try
870*4882a593Smuzhiyun * again once a second.
871*4882a593Smuzhiyun */
872*4882a593Smuzhiyun schedule_delayed_work(&hp->writer, HZ);
873*4882a593Smuzhiyun goto out;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun hvsi_push(hp);
877*4882a593Smuzhiyun if (hp->n_outbuf > 0)
878*4882a593Smuzhiyun schedule_delayed_work(&hp->writer, 10);
879*4882a593Smuzhiyun else {
880*4882a593Smuzhiyun #ifdef DEBUG
881*4882a593Smuzhiyun pr_debug("%s: outbuf emptied after %li jiffies\n", __func__,
882*4882a593Smuzhiyun jiffies - start_j);
883*4882a593Smuzhiyun start_j = 0;
884*4882a593Smuzhiyun #endif /* DEBUG */
885*4882a593Smuzhiyun wake_up_all(&hp->emptyq);
886*4882a593Smuzhiyun tty_port_tty_wakeup(&hp->port);
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun out:
890*4882a593Smuzhiyun spin_unlock_irqrestore(&hp->lock, flags);
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun
hvsi_write_room(struct tty_struct * tty)893*4882a593Smuzhiyun static int hvsi_write_room(struct tty_struct *tty)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun struct hvsi_struct *hp = tty->driver_data;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun return N_OUTBUF - hp->n_outbuf;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
hvsi_chars_in_buffer(struct tty_struct * tty)900*4882a593Smuzhiyun static int hvsi_chars_in_buffer(struct tty_struct *tty)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun struct hvsi_struct *hp = tty->driver_data;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun return hp->n_outbuf;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
hvsi_write(struct tty_struct * tty,const unsigned char * buf,int count)907*4882a593Smuzhiyun static int hvsi_write(struct tty_struct *tty,
908*4882a593Smuzhiyun const unsigned char *buf, int count)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun struct hvsi_struct *hp = tty->driver_data;
911*4882a593Smuzhiyun const char *source = buf;
912*4882a593Smuzhiyun unsigned long flags;
913*4882a593Smuzhiyun int total = 0;
914*4882a593Smuzhiyun int origcount = count;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun spin_lock_irqsave(&hp->lock, flags);
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun if (!is_open(hp)) {
921*4882a593Smuzhiyun /* we're either closing or not yet open; don't accept data */
922*4882a593Smuzhiyun pr_debug("%s: not open\n", __func__);
923*4882a593Smuzhiyun goto out;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun /*
927*4882a593Smuzhiyun * when the hypervisor buffer (16K) fills, data will stay in hp->outbuf
928*4882a593Smuzhiyun * and hvsi_write_worker will be scheduled. subsequent hvsi_write() calls
929*4882a593Smuzhiyun * will see there is no room in outbuf and return.
930*4882a593Smuzhiyun */
931*4882a593Smuzhiyun while ((count > 0) && (hvsi_write_room(tty) > 0)) {
932*4882a593Smuzhiyun int chunksize = min(count, hvsi_write_room(tty));
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun BUG_ON(hp->n_outbuf < 0);
935*4882a593Smuzhiyun memcpy(hp->outbuf + hp->n_outbuf, source, chunksize);
936*4882a593Smuzhiyun hp->n_outbuf += chunksize;
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun total += chunksize;
939*4882a593Smuzhiyun source += chunksize;
940*4882a593Smuzhiyun count -= chunksize;
941*4882a593Smuzhiyun hvsi_push(hp);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (hp->n_outbuf > 0) {
945*4882a593Smuzhiyun /*
946*4882a593Smuzhiyun * we weren't able to write it all to the hypervisor.
947*4882a593Smuzhiyun * schedule another push attempt.
948*4882a593Smuzhiyun */
949*4882a593Smuzhiyun schedule_delayed_work(&hp->writer, 10);
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun out:
953*4882a593Smuzhiyun spin_unlock_irqrestore(&hp->lock, flags);
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun if (total != origcount)
956*4882a593Smuzhiyun pr_debug("%s: wanted %i, only wrote %i\n", __func__, origcount,
957*4882a593Smuzhiyun total);
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun return total;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun /*
963*4882a593Smuzhiyun * I have never seen throttle or unthrottle called, so this little throttle
964*4882a593Smuzhiyun * buffering scheme may or may not work.
965*4882a593Smuzhiyun */
hvsi_throttle(struct tty_struct * tty)966*4882a593Smuzhiyun static void hvsi_throttle(struct tty_struct *tty)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun struct hvsi_struct *hp = tty->driver_data;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun pr_debug("%s\n", __func__);
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE);
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
hvsi_unthrottle(struct tty_struct * tty)975*4882a593Smuzhiyun static void hvsi_unthrottle(struct tty_struct *tty)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun struct hvsi_struct *hp = tty->driver_data;
978*4882a593Smuzhiyun unsigned long flags;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun pr_debug("%s\n", __func__);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun spin_lock_irqsave(&hp->lock, flags);
983*4882a593Smuzhiyun if (hp->n_throttle) {
984*4882a593Smuzhiyun hvsi_send_overflow(hp);
985*4882a593Smuzhiyun tty_flip_buffer_push(&hp->port);
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun spin_unlock_irqrestore(&hp->lock, flags);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
hvsi_tiocmget(struct tty_struct * tty)993*4882a593Smuzhiyun static int hvsi_tiocmget(struct tty_struct *tty)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun struct hvsi_struct *hp = tty->driver_data;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun hvsi_get_mctrl(hp);
998*4882a593Smuzhiyun return hp->mctrl;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
hvsi_tiocmset(struct tty_struct * tty,unsigned int set,unsigned int clear)1001*4882a593Smuzhiyun static int hvsi_tiocmset(struct tty_struct *tty,
1002*4882a593Smuzhiyun unsigned int set, unsigned int clear)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun struct hvsi_struct *hp = tty->driver_data;
1005*4882a593Smuzhiyun unsigned long flags;
1006*4882a593Smuzhiyun uint16_t new_mctrl;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun /* we can only alter DTR */
1009*4882a593Smuzhiyun clear &= TIOCM_DTR;
1010*4882a593Smuzhiyun set &= TIOCM_DTR;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun spin_lock_irqsave(&hp->lock, flags);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun new_mctrl = (hp->mctrl & ~clear) | set;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun if (hp->mctrl != new_mctrl) {
1017*4882a593Smuzhiyun hvsi_set_mctrl(hp, new_mctrl);
1018*4882a593Smuzhiyun hp->mctrl = new_mctrl;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun spin_unlock_irqrestore(&hp->lock, flags);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun return 0;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun static const struct tty_operations hvsi_ops = {
1027*4882a593Smuzhiyun .open = hvsi_open,
1028*4882a593Smuzhiyun .close = hvsi_close,
1029*4882a593Smuzhiyun .write = hvsi_write,
1030*4882a593Smuzhiyun .hangup = hvsi_hangup,
1031*4882a593Smuzhiyun .write_room = hvsi_write_room,
1032*4882a593Smuzhiyun .chars_in_buffer = hvsi_chars_in_buffer,
1033*4882a593Smuzhiyun .throttle = hvsi_throttle,
1034*4882a593Smuzhiyun .unthrottle = hvsi_unthrottle,
1035*4882a593Smuzhiyun .tiocmget = hvsi_tiocmget,
1036*4882a593Smuzhiyun .tiocmset = hvsi_tiocmset,
1037*4882a593Smuzhiyun };
1038*4882a593Smuzhiyun
hvsi_init(void)1039*4882a593Smuzhiyun static int __init hvsi_init(void)
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun int i, ret;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun hvsi_driver = alloc_tty_driver(hvsi_count);
1044*4882a593Smuzhiyun if (!hvsi_driver)
1045*4882a593Smuzhiyun return -ENOMEM;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun hvsi_driver->driver_name = "hvsi";
1048*4882a593Smuzhiyun hvsi_driver->name = "hvsi";
1049*4882a593Smuzhiyun hvsi_driver->major = HVSI_MAJOR;
1050*4882a593Smuzhiyun hvsi_driver->minor_start = HVSI_MINOR;
1051*4882a593Smuzhiyun hvsi_driver->type = TTY_DRIVER_TYPE_SYSTEM;
1052*4882a593Smuzhiyun hvsi_driver->init_termios = tty_std_termios;
1053*4882a593Smuzhiyun hvsi_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
1054*4882a593Smuzhiyun hvsi_driver->init_termios.c_ispeed = 9600;
1055*4882a593Smuzhiyun hvsi_driver->init_termios.c_ospeed = 9600;
1056*4882a593Smuzhiyun hvsi_driver->flags = TTY_DRIVER_REAL_RAW;
1057*4882a593Smuzhiyun tty_set_operations(hvsi_driver, &hvsi_ops);
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun for (i=0; i < hvsi_count; i++) {
1060*4882a593Smuzhiyun struct hvsi_struct *hp = &hvsi_ports[i];
1061*4882a593Smuzhiyun int ret = 1;
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun tty_port_link_device(&hp->port, hvsi_driver, i);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun ret = request_irq(hp->virq, hvsi_interrupt, 0, "hvsi", hp);
1066*4882a593Smuzhiyun if (ret)
1067*4882a593Smuzhiyun printk(KERN_ERR "HVSI: couldn't reserve irq 0x%x (error %i)\n",
1068*4882a593Smuzhiyun hp->virq, ret);
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun hvsi_wait = wait_for_state; /* irqs active now */
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun ret = tty_register_driver(hvsi_driver);
1073*4882a593Smuzhiyun if (ret) {
1074*4882a593Smuzhiyun pr_err("Couldn't register hvsi console driver\n");
1075*4882a593Smuzhiyun goto err_free_irq;
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun return 0;
1081*4882a593Smuzhiyun err_free_irq:
1082*4882a593Smuzhiyun hvsi_wait = poll_for_state;
1083*4882a593Smuzhiyun for (i = 0; i < hvsi_count; i++) {
1084*4882a593Smuzhiyun struct hvsi_struct *hp = &hvsi_ports[i];
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun free_irq(hp->virq, hp);
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun tty_driver_kref_put(hvsi_driver);
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun return ret;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun device_initcall(hvsi_init);
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun /***** console (not tty) code: *****/
1095*4882a593Smuzhiyun
hvsi_console_print(struct console * console,const char * buf,unsigned int count)1096*4882a593Smuzhiyun static void hvsi_console_print(struct console *console, const char *buf,
1097*4882a593Smuzhiyun unsigned int count)
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun struct hvsi_struct *hp = &hvsi_ports[console->index];
1100*4882a593Smuzhiyun char c[HVSI_MAX_OUTGOING_DATA] __ALIGNED__;
1101*4882a593Smuzhiyun unsigned int i = 0, n = 0;
1102*4882a593Smuzhiyun int ret, donecr = 0;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun mb();
1105*4882a593Smuzhiyun if (!is_open(hp))
1106*4882a593Smuzhiyun return;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun /*
1109*4882a593Smuzhiyun * ugh, we have to translate LF -> CRLF ourselves, in place.
1110*4882a593Smuzhiyun * copied from hvc_console.c:
1111*4882a593Smuzhiyun */
1112*4882a593Smuzhiyun while (count > 0 || i > 0) {
1113*4882a593Smuzhiyun if (count > 0 && i < sizeof(c)) {
1114*4882a593Smuzhiyun if (buf[n] == '\n' && !donecr) {
1115*4882a593Smuzhiyun c[i++] = '\r';
1116*4882a593Smuzhiyun donecr = 1;
1117*4882a593Smuzhiyun } else {
1118*4882a593Smuzhiyun c[i++] = buf[n++];
1119*4882a593Smuzhiyun donecr = 0;
1120*4882a593Smuzhiyun --count;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun } else {
1123*4882a593Smuzhiyun ret = hvsi_put_chars(hp, c, i);
1124*4882a593Smuzhiyun if (ret < 0)
1125*4882a593Smuzhiyun i = 0;
1126*4882a593Smuzhiyun i -= ret;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
hvsi_console_device(struct console * console,int * index)1131*4882a593Smuzhiyun static struct tty_driver *hvsi_console_device(struct console *console,
1132*4882a593Smuzhiyun int *index)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun *index = console->index;
1135*4882a593Smuzhiyun return hvsi_driver;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun
hvsi_console_setup(struct console * console,char * options)1138*4882a593Smuzhiyun static int __init hvsi_console_setup(struct console *console, char *options)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun struct hvsi_struct *hp;
1141*4882a593Smuzhiyun int ret;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun if (console->index < 0 || console->index >= hvsi_count)
1144*4882a593Smuzhiyun return -EINVAL;
1145*4882a593Smuzhiyun hp = &hvsi_ports[console->index];
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun /* give the FSP a chance to change the baud rate when we re-open */
1148*4882a593Smuzhiyun hvsi_close_protocol(hp);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun ret = hvsi_handshake(hp);
1151*4882a593Smuzhiyun if (ret < 0)
1152*4882a593Smuzhiyun return ret;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun ret = hvsi_get_mctrl(hp);
1155*4882a593Smuzhiyun if (ret < 0)
1156*4882a593Smuzhiyun return ret;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
1159*4882a593Smuzhiyun if (ret < 0)
1160*4882a593Smuzhiyun return ret;
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun hp->flags |= HVSI_CONSOLE;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun return 0;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun static struct console hvsi_console = {
1168*4882a593Smuzhiyun .name = "hvsi",
1169*4882a593Smuzhiyun .write = hvsi_console_print,
1170*4882a593Smuzhiyun .device = hvsi_console_device,
1171*4882a593Smuzhiyun .setup = hvsi_console_setup,
1172*4882a593Smuzhiyun .flags = CON_PRINTBUFFER,
1173*4882a593Smuzhiyun .index = -1,
1174*4882a593Smuzhiyun };
1175*4882a593Smuzhiyun
hvsi_console_init(void)1176*4882a593Smuzhiyun static int __init hvsi_console_init(void)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun struct device_node *vty;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun hvsi_wait = poll_for_state; /* no irqs yet; must poll */
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun /* search device tree for vty nodes */
1183*4882a593Smuzhiyun for_each_compatible_node(vty, "serial", "hvterm-protocol") {
1184*4882a593Smuzhiyun struct hvsi_struct *hp;
1185*4882a593Smuzhiyun const __be32 *vtermno, *irq;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun vtermno = of_get_property(vty, "reg", NULL);
1188*4882a593Smuzhiyun irq = of_get_property(vty, "interrupts", NULL);
1189*4882a593Smuzhiyun if (!vtermno || !irq)
1190*4882a593Smuzhiyun continue;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun if (hvsi_count >= MAX_NR_HVSI_CONSOLES) {
1193*4882a593Smuzhiyun of_node_put(vty);
1194*4882a593Smuzhiyun break;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun hp = &hvsi_ports[hvsi_count];
1198*4882a593Smuzhiyun INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
1199*4882a593Smuzhiyun INIT_WORK(&hp->handshaker, hvsi_handshaker);
1200*4882a593Smuzhiyun init_waitqueue_head(&hp->emptyq);
1201*4882a593Smuzhiyun init_waitqueue_head(&hp->stateq);
1202*4882a593Smuzhiyun spin_lock_init(&hp->lock);
1203*4882a593Smuzhiyun tty_port_init(&hp->port);
1204*4882a593Smuzhiyun hp->index = hvsi_count;
1205*4882a593Smuzhiyun hp->inbuf_end = hp->inbuf;
1206*4882a593Smuzhiyun hp->state = HVSI_CLOSED;
1207*4882a593Smuzhiyun hp->vtermno = be32_to_cpup(vtermno);
1208*4882a593Smuzhiyun hp->virq = irq_create_mapping(NULL, be32_to_cpup(irq));
1209*4882a593Smuzhiyun if (hp->virq == 0) {
1210*4882a593Smuzhiyun printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
1211*4882a593Smuzhiyun __func__, be32_to_cpup(irq));
1212*4882a593Smuzhiyun tty_port_destroy(&hp->port);
1213*4882a593Smuzhiyun continue;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun hvsi_count++;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun if (hvsi_count)
1220*4882a593Smuzhiyun register_console(&hvsi_console);
1221*4882a593Smuzhiyun return 0;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun console_initcall(hvsi_console_init);
1224