1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) ST-Ericsson AB 2010
4*4882a593Smuzhiyun * Author: Sjur Brendeland
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/stddef.h>
10*4882a593Smuzhiyun #include <linux/spinlock.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <net/caif/caif_layer.h>
13*4882a593Smuzhiyun #include <net/caif/cfpkt.h>
14*4882a593Smuzhiyun #include <net/caif/cfserl.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define container_obj(layr) ((struct cfserl *) layr)
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define CFSERL_STX 0x02
19*4882a593Smuzhiyun #define SERIAL_MINIUM_PACKET_SIZE 4
20*4882a593Smuzhiyun #define SERIAL_MAX_FRAMESIZE 4096
21*4882a593Smuzhiyun struct cfserl {
22*4882a593Smuzhiyun struct cflayer layer;
23*4882a593Smuzhiyun struct cfpkt *incomplete_frm;
24*4882a593Smuzhiyun /* Protects parallel processing of incoming packets */
25*4882a593Smuzhiyun spinlock_t sync;
26*4882a593Smuzhiyun bool usestx;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
30*4882a593Smuzhiyun static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
31*4882a593Smuzhiyun static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
32*4882a593Smuzhiyun int phyid);
33*4882a593Smuzhiyun
cfserl_release(struct cflayer * layer)34*4882a593Smuzhiyun void cfserl_release(struct cflayer *layer)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun kfree(layer);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
cfserl_create(int instance,bool use_stx)39*4882a593Smuzhiyun struct cflayer *cfserl_create(int instance, bool use_stx)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
42*4882a593Smuzhiyun if (!this)
43*4882a593Smuzhiyun return NULL;
44*4882a593Smuzhiyun caif_assert(offsetof(struct cfserl, layer) == 0);
45*4882a593Smuzhiyun this->layer.receive = cfserl_receive;
46*4882a593Smuzhiyun this->layer.transmit = cfserl_transmit;
47*4882a593Smuzhiyun this->layer.ctrlcmd = cfserl_ctrlcmd;
48*4882a593Smuzhiyun this->usestx = use_stx;
49*4882a593Smuzhiyun spin_lock_init(&this->sync);
50*4882a593Smuzhiyun snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
51*4882a593Smuzhiyun return &this->layer;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
cfserl_receive(struct cflayer * l,struct cfpkt * newpkt)54*4882a593Smuzhiyun static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct cfserl *layr = container_obj(l);
57*4882a593Smuzhiyun u16 pkt_len;
58*4882a593Smuzhiyun struct cfpkt *pkt = NULL;
59*4882a593Smuzhiyun struct cfpkt *tail_pkt = NULL;
60*4882a593Smuzhiyun u8 tmp8;
61*4882a593Smuzhiyun u16 tmp;
62*4882a593Smuzhiyun u8 stx = CFSERL_STX;
63*4882a593Smuzhiyun int ret;
64*4882a593Smuzhiyun u16 expectlen = 0;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun caif_assert(newpkt != NULL);
67*4882a593Smuzhiyun spin_lock(&layr->sync);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if (layr->incomplete_frm != NULL) {
70*4882a593Smuzhiyun layr->incomplete_frm =
71*4882a593Smuzhiyun cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
72*4882a593Smuzhiyun pkt = layr->incomplete_frm;
73*4882a593Smuzhiyun if (pkt == NULL) {
74*4882a593Smuzhiyun spin_unlock(&layr->sync);
75*4882a593Smuzhiyun return -ENOMEM;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun } else {
78*4882a593Smuzhiyun pkt = newpkt;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun layr->incomplete_frm = NULL;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun do {
83*4882a593Smuzhiyun /* Search for STX at start of pkt if STX is used */
84*4882a593Smuzhiyun if (layr->usestx) {
85*4882a593Smuzhiyun cfpkt_extr_head(pkt, &tmp8, 1);
86*4882a593Smuzhiyun if (tmp8 != CFSERL_STX) {
87*4882a593Smuzhiyun while (cfpkt_more(pkt)
88*4882a593Smuzhiyun && tmp8 != CFSERL_STX) {
89*4882a593Smuzhiyun cfpkt_extr_head(pkt, &tmp8, 1);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun if (!cfpkt_more(pkt)) {
92*4882a593Smuzhiyun cfpkt_destroy(pkt);
93*4882a593Smuzhiyun layr->incomplete_frm = NULL;
94*4882a593Smuzhiyun spin_unlock(&layr->sync);
95*4882a593Smuzhiyun return -EPROTO;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun pkt_len = cfpkt_getlen(pkt);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * pkt_len is the accumulated length of the packet data
104*4882a593Smuzhiyun * we have received so far.
105*4882a593Smuzhiyun * Exit if frame doesn't hold length.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (pkt_len < 2) {
109*4882a593Smuzhiyun if (layr->usestx)
110*4882a593Smuzhiyun cfpkt_add_head(pkt, &stx, 1);
111*4882a593Smuzhiyun layr->incomplete_frm = pkt;
112*4882a593Smuzhiyun spin_unlock(&layr->sync);
113*4882a593Smuzhiyun return 0;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * Find length of frame.
118*4882a593Smuzhiyun * expectlen is the length we need for a full frame.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun cfpkt_peek_head(pkt, &tmp, 2);
121*4882a593Smuzhiyun expectlen = le16_to_cpu(tmp) + 2;
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * Frame error handling
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun if (expectlen < SERIAL_MINIUM_PACKET_SIZE
126*4882a593Smuzhiyun || expectlen > SERIAL_MAX_FRAMESIZE) {
127*4882a593Smuzhiyun if (!layr->usestx) {
128*4882a593Smuzhiyun if (pkt != NULL)
129*4882a593Smuzhiyun cfpkt_destroy(pkt);
130*4882a593Smuzhiyun layr->incomplete_frm = NULL;
131*4882a593Smuzhiyun expectlen = 0;
132*4882a593Smuzhiyun spin_unlock(&layr->sync);
133*4882a593Smuzhiyun return -EPROTO;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun continue;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (pkt_len < expectlen) {
139*4882a593Smuzhiyun /* Too little received data */
140*4882a593Smuzhiyun if (layr->usestx)
141*4882a593Smuzhiyun cfpkt_add_head(pkt, &stx, 1);
142*4882a593Smuzhiyun layr->incomplete_frm = pkt;
143*4882a593Smuzhiyun spin_unlock(&layr->sync);
144*4882a593Smuzhiyun return 0;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun * Enough data for at least one frame.
149*4882a593Smuzhiyun * Split the frame, if too long
150*4882a593Smuzhiyun */
151*4882a593Smuzhiyun if (pkt_len > expectlen)
152*4882a593Smuzhiyun tail_pkt = cfpkt_split(pkt, expectlen);
153*4882a593Smuzhiyun else
154*4882a593Smuzhiyun tail_pkt = NULL;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* Send the first part of packet upwards.*/
157*4882a593Smuzhiyun spin_unlock(&layr->sync);
158*4882a593Smuzhiyun ret = layr->layer.up->receive(layr->layer.up, pkt);
159*4882a593Smuzhiyun spin_lock(&layr->sync);
160*4882a593Smuzhiyun if (ret == -EILSEQ) {
161*4882a593Smuzhiyun if (layr->usestx) {
162*4882a593Smuzhiyun if (tail_pkt != NULL)
163*4882a593Smuzhiyun pkt = cfpkt_append(pkt, tail_pkt, 0);
164*4882a593Smuzhiyun /* Start search for next STX if frame failed */
165*4882a593Smuzhiyun continue;
166*4882a593Smuzhiyun } else {
167*4882a593Smuzhiyun cfpkt_destroy(pkt);
168*4882a593Smuzhiyun pkt = NULL;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun pkt = tail_pkt;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun } while (pkt != NULL);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun spin_unlock(&layr->sync);
177*4882a593Smuzhiyun return 0;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
cfserl_transmit(struct cflayer * layer,struct cfpkt * newpkt)180*4882a593Smuzhiyun static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun struct cfserl *layr = container_obj(layer);
183*4882a593Smuzhiyun u8 tmp8 = CFSERL_STX;
184*4882a593Smuzhiyun if (layr->usestx)
185*4882a593Smuzhiyun cfpkt_add_head(newpkt, &tmp8, 1);
186*4882a593Smuzhiyun return layer->dn->transmit(layer->dn, newpkt);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
cfserl_ctrlcmd(struct cflayer * layr,enum caif_ctrlcmd ctrl,int phyid)189*4882a593Smuzhiyun static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
190*4882a593Smuzhiyun int phyid)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun layr->up->ctrlcmd(layr->up, ctrl, phyid);
193*4882a593Smuzhiyun }
194