1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2015, Sony Mobile Communications Inc.
4*4882a593Smuzhiyun * Copyright (c) 2013, The Linux Foundation. All rights reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/netlink.h>
8*4882a593Smuzhiyun #include <linux/qrtr.h>
9*4882a593Smuzhiyun #include <linux/termios.h> /* For TIOCINQ/OUTQ */
10*4882a593Smuzhiyun #include <linux/spinlock.h>
11*4882a593Smuzhiyun #include <linux/wait.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <net/sock.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "qrtr.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define QRTR_PROTO_VER_1 1
18*4882a593Smuzhiyun #define QRTR_PROTO_VER_2 3
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* auto-bind range */
21*4882a593Smuzhiyun #define QRTR_MIN_EPH_SOCKET 0x4000
22*4882a593Smuzhiyun #define QRTR_MAX_EPH_SOCKET 0x7fff
23*4882a593Smuzhiyun #define QRTR_EPH_PORT_RANGE \
24*4882a593Smuzhiyun XA_LIMIT(QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET)
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /**
27*4882a593Smuzhiyun * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
28*4882a593Smuzhiyun * @version: protocol version
29*4882a593Smuzhiyun * @type: packet type; one of QRTR_TYPE_*
30*4882a593Smuzhiyun * @src_node_id: source node
31*4882a593Smuzhiyun * @src_port_id: source port
32*4882a593Smuzhiyun * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
33*4882a593Smuzhiyun * @size: length of packet, excluding this header
34*4882a593Smuzhiyun * @dst_node_id: destination node
35*4882a593Smuzhiyun * @dst_port_id: destination port
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun struct qrtr_hdr_v1 {
38*4882a593Smuzhiyun __le32 version;
39*4882a593Smuzhiyun __le32 type;
40*4882a593Smuzhiyun __le32 src_node_id;
41*4882a593Smuzhiyun __le32 src_port_id;
42*4882a593Smuzhiyun __le32 confirm_rx;
43*4882a593Smuzhiyun __le32 size;
44*4882a593Smuzhiyun __le32 dst_node_id;
45*4882a593Smuzhiyun __le32 dst_port_id;
46*4882a593Smuzhiyun } __packed;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /**
49*4882a593Smuzhiyun * struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions
50*4882a593Smuzhiyun * @version: protocol version
51*4882a593Smuzhiyun * @type: packet type; one of QRTR_TYPE_*
52*4882a593Smuzhiyun * @flags: bitmask of QRTR_FLAGS_*
53*4882a593Smuzhiyun * @optlen: length of optional header data
54*4882a593Smuzhiyun * @size: length of packet, excluding this header and optlen
55*4882a593Smuzhiyun * @src_node_id: source node
56*4882a593Smuzhiyun * @src_port_id: source port
57*4882a593Smuzhiyun * @dst_node_id: destination node
58*4882a593Smuzhiyun * @dst_port_id: destination port
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun struct qrtr_hdr_v2 {
61*4882a593Smuzhiyun u8 version;
62*4882a593Smuzhiyun u8 type;
63*4882a593Smuzhiyun u8 flags;
64*4882a593Smuzhiyun u8 optlen;
65*4882a593Smuzhiyun __le32 size;
66*4882a593Smuzhiyun __le16 src_node_id;
67*4882a593Smuzhiyun __le16 src_port_id;
68*4882a593Smuzhiyun __le16 dst_node_id;
69*4882a593Smuzhiyun __le16 dst_port_id;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #define QRTR_FLAGS_CONFIRM_RX BIT(0)
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun struct qrtr_cb {
75*4882a593Smuzhiyun u32 src_node;
76*4882a593Smuzhiyun u32 src_port;
77*4882a593Smuzhiyun u32 dst_node;
78*4882a593Smuzhiyun u32 dst_port;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun u8 type;
81*4882a593Smuzhiyun u8 confirm_rx;
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \
85*4882a593Smuzhiyun sizeof(struct qrtr_hdr_v2))
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun struct qrtr_sock {
88*4882a593Smuzhiyun /* WARNING: sk must be the first member */
89*4882a593Smuzhiyun struct sock sk;
90*4882a593Smuzhiyun struct sockaddr_qrtr us;
91*4882a593Smuzhiyun struct sockaddr_qrtr peer;
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun
qrtr_sk(struct sock * sk)94*4882a593Smuzhiyun static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
97*4882a593Smuzhiyun return container_of(sk, struct qrtr_sock, sk);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun static unsigned int qrtr_local_nid = 1;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* for node ids */
103*4882a593Smuzhiyun static RADIX_TREE(qrtr_nodes, GFP_ATOMIC);
104*4882a593Smuzhiyun static DEFINE_SPINLOCK(qrtr_nodes_lock);
105*4882a593Smuzhiyun /* broadcast list */
106*4882a593Smuzhiyun static LIST_HEAD(qrtr_all_nodes);
107*4882a593Smuzhiyun /* lock for qrtr_all_nodes and node reference */
108*4882a593Smuzhiyun static DEFINE_MUTEX(qrtr_node_lock);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* local port allocation management */
111*4882a593Smuzhiyun static DEFINE_XARRAY_ALLOC(qrtr_ports);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /**
114*4882a593Smuzhiyun * struct qrtr_node - endpoint node
115*4882a593Smuzhiyun * @ep_lock: lock for endpoint management and callbacks
116*4882a593Smuzhiyun * @ep: endpoint
117*4882a593Smuzhiyun * @ref: reference count for node
118*4882a593Smuzhiyun * @nid: node id
119*4882a593Smuzhiyun * @qrtr_tx_flow: tree of qrtr_tx_flow, keyed by node << 32 | port
120*4882a593Smuzhiyun * @qrtr_tx_lock: lock for qrtr_tx_flow inserts
121*4882a593Smuzhiyun * @rx_queue: receive queue
122*4882a593Smuzhiyun * @item: list item for broadcast list
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun struct qrtr_node {
125*4882a593Smuzhiyun struct mutex ep_lock;
126*4882a593Smuzhiyun struct qrtr_endpoint *ep;
127*4882a593Smuzhiyun struct kref ref;
128*4882a593Smuzhiyun unsigned int nid;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun struct radix_tree_root qrtr_tx_flow;
131*4882a593Smuzhiyun struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun struct sk_buff_head rx_queue;
134*4882a593Smuzhiyun struct list_head item;
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /**
138*4882a593Smuzhiyun * struct qrtr_tx_flow - tx flow control
139*4882a593Smuzhiyun * @resume_tx: waiters for a resume tx from the remote
140*4882a593Smuzhiyun * @pending: number of waiting senders
141*4882a593Smuzhiyun * @tx_failed: indicates that a message with confirm_rx flag was lost
142*4882a593Smuzhiyun */
143*4882a593Smuzhiyun struct qrtr_tx_flow {
144*4882a593Smuzhiyun struct wait_queue_head resume_tx;
145*4882a593Smuzhiyun int pending;
146*4882a593Smuzhiyun int tx_failed;
147*4882a593Smuzhiyun };
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #define QRTR_TX_FLOW_HIGH 10
150*4882a593Smuzhiyun #define QRTR_TX_FLOW_LOW 5
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
153*4882a593Smuzhiyun int type, struct sockaddr_qrtr *from,
154*4882a593Smuzhiyun struct sockaddr_qrtr *to);
155*4882a593Smuzhiyun static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
156*4882a593Smuzhiyun int type, struct sockaddr_qrtr *from,
157*4882a593Smuzhiyun struct sockaddr_qrtr *to);
158*4882a593Smuzhiyun static struct qrtr_sock *qrtr_port_lookup(int port);
159*4882a593Smuzhiyun static void qrtr_port_put(struct qrtr_sock *ipc);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* Release node resources and free the node.
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * Do not call directly, use qrtr_node_release. To be used with
164*4882a593Smuzhiyun * kref_put_mutex. As such, the node mutex is expected to be locked on call.
165*4882a593Smuzhiyun */
__qrtr_node_release(struct kref * kref)166*4882a593Smuzhiyun static void __qrtr_node_release(struct kref *kref)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
169*4882a593Smuzhiyun struct radix_tree_iter iter;
170*4882a593Smuzhiyun struct qrtr_tx_flow *flow;
171*4882a593Smuzhiyun unsigned long flags;
172*4882a593Smuzhiyun void __rcu **slot;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun spin_lock_irqsave(&qrtr_nodes_lock, flags);
175*4882a593Smuzhiyun if (node->nid != QRTR_EP_NID_AUTO)
176*4882a593Smuzhiyun radix_tree_delete(&qrtr_nodes, node->nid);
177*4882a593Smuzhiyun spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun list_del(&node->item);
180*4882a593Smuzhiyun mutex_unlock(&qrtr_node_lock);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun skb_queue_purge(&node->rx_queue);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /* Free tx flow counters */
185*4882a593Smuzhiyun radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
186*4882a593Smuzhiyun flow = *slot;
187*4882a593Smuzhiyun radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
188*4882a593Smuzhiyun kfree(flow);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun kfree(node);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* Increment reference to node. */
qrtr_node_acquire(struct qrtr_node * node)194*4882a593Smuzhiyun static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun if (node)
197*4882a593Smuzhiyun kref_get(&node->ref);
198*4882a593Smuzhiyun return node;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* Decrement reference to node and release as necessary. */
qrtr_node_release(struct qrtr_node * node)202*4882a593Smuzhiyun static void qrtr_node_release(struct qrtr_node *node)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun if (!node)
205*4882a593Smuzhiyun return;
206*4882a593Smuzhiyun kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /**
210*4882a593Smuzhiyun * qrtr_tx_resume() - reset flow control counter
211*4882a593Smuzhiyun * @node: qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on
212*4882a593Smuzhiyun * @skb: resume_tx packet
213*4882a593Smuzhiyun */
qrtr_tx_resume(struct qrtr_node * node,struct sk_buff * skb)214*4882a593Smuzhiyun static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun struct qrtr_ctrl_pkt *pkt = (struct qrtr_ctrl_pkt *)skb->data;
217*4882a593Smuzhiyun u64 remote_node = le32_to_cpu(pkt->client.node);
218*4882a593Smuzhiyun u32 remote_port = le32_to_cpu(pkt->client.port);
219*4882a593Smuzhiyun struct qrtr_tx_flow *flow;
220*4882a593Smuzhiyun unsigned long key;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun key = remote_node << 32 | remote_port;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun rcu_read_lock();
225*4882a593Smuzhiyun flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
226*4882a593Smuzhiyun rcu_read_unlock();
227*4882a593Smuzhiyun if (flow) {
228*4882a593Smuzhiyun spin_lock(&flow->resume_tx.lock);
229*4882a593Smuzhiyun flow->pending = 0;
230*4882a593Smuzhiyun spin_unlock(&flow->resume_tx.lock);
231*4882a593Smuzhiyun wake_up_interruptible_all(&flow->resume_tx);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun consume_skb(skb);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /**
238*4882a593Smuzhiyun * qrtr_tx_wait() - flow control for outgoing packets
239*4882a593Smuzhiyun * @node: qrtr_node that the packet is to be send to
240*4882a593Smuzhiyun * @dest_node: node id of the destination
241*4882a593Smuzhiyun * @dest_port: port number of the destination
242*4882a593Smuzhiyun * @type: type of message
243*4882a593Smuzhiyun *
244*4882a593Smuzhiyun * The flow control scheme is based around the low and high "watermarks". When
245*4882a593Smuzhiyun * the low watermark is passed the confirm_rx flag is set on the outgoing
246*4882a593Smuzhiyun * message, which will trigger the remote to send a control message of the type
247*4882a593Smuzhiyun * QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit
248*4882a593Smuzhiyun * further transmision should be paused.
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun * Return: 1 if confirm_rx should be set, 0 otherwise or errno failure
251*4882a593Smuzhiyun */
qrtr_tx_wait(struct qrtr_node * node,int dest_node,int dest_port,int type)252*4882a593Smuzhiyun static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
253*4882a593Smuzhiyun int type)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun unsigned long key = (u64)dest_node << 32 | dest_port;
256*4882a593Smuzhiyun struct qrtr_tx_flow *flow;
257*4882a593Smuzhiyun int confirm_rx = 0;
258*4882a593Smuzhiyun int ret;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* Never set confirm_rx on non-data packets */
261*4882a593Smuzhiyun if (type != QRTR_TYPE_DATA)
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun mutex_lock(&node->qrtr_tx_lock);
265*4882a593Smuzhiyun flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
266*4882a593Smuzhiyun if (!flow) {
267*4882a593Smuzhiyun flow = kzalloc(sizeof(*flow), GFP_KERNEL);
268*4882a593Smuzhiyun if (flow) {
269*4882a593Smuzhiyun init_waitqueue_head(&flow->resume_tx);
270*4882a593Smuzhiyun if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
271*4882a593Smuzhiyun kfree(flow);
272*4882a593Smuzhiyun flow = NULL;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun mutex_unlock(&node->qrtr_tx_lock);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* Set confirm_rx if we where unable to find and allocate a flow */
279*4882a593Smuzhiyun if (!flow)
280*4882a593Smuzhiyun return 1;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun spin_lock_irq(&flow->resume_tx.lock);
283*4882a593Smuzhiyun ret = wait_event_interruptible_locked_irq(flow->resume_tx,
284*4882a593Smuzhiyun flow->pending < QRTR_TX_FLOW_HIGH ||
285*4882a593Smuzhiyun flow->tx_failed ||
286*4882a593Smuzhiyun !node->ep);
287*4882a593Smuzhiyun if (ret < 0) {
288*4882a593Smuzhiyun confirm_rx = ret;
289*4882a593Smuzhiyun } else if (!node->ep) {
290*4882a593Smuzhiyun confirm_rx = -EPIPE;
291*4882a593Smuzhiyun } else if (flow->tx_failed) {
292*4882a593Smuzhiyun flow->tx_failed = 0;
293*4882a593Smuzhiyun confirm_rx = 1;
294*4882a593Smuzhiyun } else {
295*4882a593Smuzhiyun flow->pending++;
296*4882a593Smuzhiyun confirm_rx = flow->pending == QRTR_TX_FLOW_LOW;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun spin_unlock_irq(&flow->resume_tx.lock);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun return confirm_rx;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /**
304*4882a593Smuzhiyun * qrtr_tx_flow_failed() - flag that tx of confirm_rx flagged messages failed
305*4882a593Smuzhiyun * @node: qrtr_node that the packet is to be send to
306*4882a593Smuzhiyun * @dest_node: node id of the destination
307*4882a593Smuzhiyun * @dest_port: port number of the destination
308*4882a593Smuzhiyun *
309*4882a593Smuzhiyun * Signal that the transmission of a message with confirm_rx flag failed. The
310*4882a593Smuzhiyun * flow's "pending" counter will keep incrementing towards QRTR_TX_FLOW_HIGH,
311*4882a593Smuzhiyun * at which point transmission would stall forever waiting for the resume TX
312*4882a593Smuzhiyun * message associated with the dropped confirm_rx message.
313*4882a593Smuzhiyun * Work around this by marking the flow as having a failed transmission and
314*4882a593Smuzhiyun * cause the next transmission attempt to be sent with the confirm_rx.
315*4882a593Smuzhiyun */
qrtr_tx_flow_failed(struct qrtr_node * node,int dest_node,int dest_port)316*4882a593Smuzhiyun static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node,
317*4882a593Smuzhiyun int dest_port)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun unsigned long key = (u64)dest_node << 32 | dest_port;
320*4882a593Smuzhiyun struct qrtr_tx_flow *flow;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun rcu_read_lock();
323*4882a593Smuzhiyun flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
324*4882a593Smuzhiyun rcu_read_unlock();
325*4882a593Smuzhiyun if (flow) {
326*4882a593Smuzhiyun spin_lock_irq(&flow->resume_tx.lock);
327*4882a593Smuzhiyun flow->tx_failed = 1;
328*4882a593Smuzhiyun spin_unlock_irq(&flow->resume_tx.lock);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* Pass an outgoing packet socket buffer to the endpoint driver. */
qrtr_node_enqueue(struct qrtr_node * node,struct sk_buff * skb,int type,struct sockaddr_qrtr * from,struct sockaddr_qrtr * to)333*4882a593Smuzhiyun static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
334*4882a593Smuzhiyun int type, struct sockaddr_qrtr *from,
335*4882a593Smuzhiyun struct sockaddr_qrtr *to)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun struct qrtr_hdr_v1 *hdr;
338*4882a593Smuzhiyun size_t len = skb->len;
339*4882a593Smuzhiyun int rc, confirm_rx;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type);
342*4882a593Smuzhiyun if (confirm_rx < 0) {
343*4882a593Smuzhiyun kfree_skb(skb);
344*4882a593Smuzhiyun return confirm_rx;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun hdr = skb_push(skb, sizeof(*hdr));
348*4882a593Smuzhiyun hdr->version = cpu_to_le32(QRTR_PROTO_VER_1);
349*4882a593Smuzhiyun hdr->type = cpu_to_le32(type);
350*4882a593Smuzhiyun hdr->src_node_id = cpu_to_le32(from->sq_node);
351*4882a593Smuzhiyun hdr->src_port_id = cpu_to_le32(from->sq_port);
352*4882a593Smuzhiyun if (to->sq_port == QRTR_PORT_CTRL) {
353*4882a593Smuzhiyun hdr->dst_node_id = cpu_to_le32(node->nid);
354*4882a593Smuzhiyun hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
355*4882a593Smuzhiyun } else {
356*4882a593Smuzhiyun hdr->dst_node_id = cpu_to_le32(to->sq_node);
357*4882a593Smuzhiyun hdr->dst_port_id = cpu_to_le32(to->sq_port);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun hdr->size = cpu_to_le32(len);
361*4882a593Smuzhiyun hdr->confirm_rx = !!confirm_rx;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (!rc) {
366*4882a593Smuzhiyun mutex_lock(&node->ep_lock);
367*4882a593Smuzhiyun rc = -ENODEV;
368*4882a593Smuzhiyun if (node->ep)
369*4882a593Smuzhiyun rc = node->ep->xmit(node->ep, skb);
370*4882a593Smuzhiyun else
371*4882a593Smuzhiyun kfree_skb(skb);
372*4882a593Smuzhiyun mutex_unlock(&node->ep_lock);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun /* Need to ensure that a subsequent message carries the otherwise lost
375*4882a593Smuzhiyun * confirm_rx flag if we dropped this one */
376*4882a593Smuzhiyun if (rc && confirm_rx)
377*4882a593Smuzhiyun qrtr_tx_flow_failed(node, to->sq_node, to->sq_port);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return rc;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /* Lookup node by id.
383*4882a593Smuzhiyun *
384*4882a593Smuzhiyun * callers must release with qrtr_node_release()
385*4882a593Smuzhiyun */
qrtr_node_lookup(unsigned int nid)386*4882a593Smuzhiyun static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun struct qrtr_node *node;
389*4882a593Smuzhiyun unsigned long flags;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun spin_lock_irqsave(&qrtr_nodes_lock, flags);
392*4882a593Smuzhiyun node = radix_tree_lookup(&qrtr_nodes, nid);
393*4882a593Smuzhiyun node = qrtr_node_acquire(node);
394*4882a593Smuzhiyun spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun return node;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* Assign node id to node.
400*4882a593Smuzhiyun *
401*4882a593Smuzhiyun * This is mostly useful for automatic node id assignment, based on
402*4882a593Smuzhiyun * the source id in the incoming packet.
403*4882a593Smuzhiyun */
qrtr_node_assign(struct qrtr_node * node,unsigned int nid)404*4882a593Smuzhiyun static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun unsigned long flags;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
409*4882a593Smuzhiyun return;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun spin_lock_irqsave(&qrtr_nodes_lock, flags);
412*4882a593Smuzhiyun radix_tree_insert(&qrtr_nodes, nid, node);
413*4882a593Smuzhiyun node->nid = nid;
414*4882a593Smuzhiyun spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /**
418*4882a593Smuzhiyun * qrtr_endpoint_post() - post incoming data
419*4882a593Smuzhiyun * @ep: endpoint handle
420*4882a593Smuzhiyun * @data: data pointer
421*4882a593Smuzhiyun * @len: size of data in bytes
422*4882a593Smuzhiyun *
423*4882a593Smuzhiyun * Return: 0 on success; negative error code on failure
424*4882a593Smuzhiyun */
qrtr_endpoint_post(struct qrtr_endpoint * ep,const void * data,size_t len)425*4882a593Smuzhiyun int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun struct qrtr_node *node = ep->node;
428*4882a593Smuzhiyun const struct qrtr_hdr_v1 *v1;
429*4882a593Smuzhiyun const struct qrtr_hdr_v2 *v2;
430*4882a593Smuzhiyun struct qrtr_sock *ipc;
431*4882a593Smuzhiyun struct sk_buff *skb;
432*4882a593Smuzhiyun struct qrtr_cb *cb;
433*4882a593Smuzhiyun size_t size;
434*4882a593Smuzhiyun unsigned int ver;
435*4882a593Smuzhiyun size_t hdrlen;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (len == 0 || len & 3)
438*4882a593Smuzhiyun return -EINVAL;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN);
441*4882a593Smuzhiyun if (!skb)
442*4882a593Smuzhiyun return -ENOMEM;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun cb = (struct qrtr_cb *)skb->cb;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* Version field in v1 is little endian, so this works for both cases */
447*4882a593Smuzhiyun ver = *(u8*)data;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun switch (ver) {
450*4882a593Smuzhiyun case QRTR_PROTO_VER_1:
451*4882a593Smuzhiyun if (len < sizeof(*v1))
452*4882a593Smuzhiyun goto err;
453*4882a593Smuzhiyun v1 = data;
454*4882a593Smuzhiyun hdrlen = sizeof(*v1);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun cb->type = le32_to_cpu(v1->type);
457*4882a593Smuzhiyun cb->src_node = le32_to_cpu(v1->src_node_id);
458*4882a593Smuzhiyun cb->src_port = le32_to_cpu(v1->src_port_id);
459*4882a593Smuzhiyun cb->confirm_rx = !!v1->confirm_rx;
460*4882a593Smuzhiyun cb->dst_node = le32_to_cpu(v1->dst_node_id);
461*4882a593Smuzhiyun cb->dst_port = le32_to_cpu(v1->dst_port_id);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun size = le32_to_cpu(v1->size);
464*4882a593Smuzhiyun break;
465*4882a593Smuzhiyun case QRTR_PROTO_VER_2:
466*4882a593Smuzhiyun if (len < sizeof(*v2))
467*4882a593Smuzhiyun goto err;
468*4882a593Smuzhiyun v2 = data;
469*4882a593Smuzhiyun hdrlen = sizeof(*v2) + v2->optlen;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun cb->type = v2->type;
472*4882a593Smuzhiyun cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
473*4882a593Smuzhiyun cb->src_node = le16_to_cpu(v2->src_node_id);
474*4882a593Smuzhiyun cb->src_port = le16_to_cpu(v2->src_port_id);
475*4882a593Smuzhiyun cb->dst_node = le16_to_cpu(v2->dst_node_id);
476*4882a593Smuzhiyun cb->dst_port = le16_to_cpu(v2->dst_port_id);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (cb->src_port == (u16)QRTR_PORT_CTRL)
479*4882a593Smuzhiyun cb->src_port = QRTR_PORT_CTRL;
480*4882a593Smuzhiyun if (cb->dst_port == (u16)QRTR_PORT_CTRL)
481*4882a593Smuzhiyun cb->dst_port = QRTR_PORT_CTRL;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun size = le32_to_cpu(v2->size);
484*4882a593Smuzhiyun break;
485*4882a593Smuzhiyun default:
486*4882a593Smuzhiyun pr_err("qrtr: Invalid version %d\n", ver);
487*4882a593Smuzhiyun goto err;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (!size || len != ALIGN(size, 4) + hdrlen)
491*4882a593Smuzhiyun goto err;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
494*4882a593Smuzhiyun cb->type != QRTR_TYPE_RESUME_TX)
495*4882a593Smuzhiyun goto err;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun skb_put_data(skb, data + hdrlen, size);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun qrtr_node_assign(node, cb->src_node);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (cb->type == QRTR_TYPE_RESUME_TX) {
502*4882a593Smuzhiyun qrtr_tx_resume(node, skb);
503*4882a593Smuzhiyun } else {
504*4882a593Smuzhiyun ipc = qrtr_port_lookup(cb->dst_port);
505*4882a593Smuzhiyun if (!ipc)
506*4882a593Smuzhiyun goto err;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (sock_queue_rcv_skb(&ipc->sk, skb)) {
509*4882a593Smuzhiyun qrtr_port_put(ipc);
510*4882a593Smuzhiyun goto err;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun qrtr_port_put(ipc);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun return 0;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun err:
519*4882a593Smuzhiyun kfree_skb(skb);
520*4882a593Smuzhiyun return -EINVAL;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /**
526*4882a593Smuzhiyun * qrtr_alloc_ctrl_packet() - allocate control packet skb
527*4882a593Smuzhiyun * @pkt: reference to qrtr_ctrl_pkt pointer
528*4882a593Smuzhiyun *
529*4882a593Smuzhiyun * Returns newly allocated sk_buff, or NULL on failure
530*4882a593Smuzhiyun *
531*4882a593Smuzhiyun * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and
532*4882a593Smuzhiyun * on success returns a reference to the control packet in @pkt.
533*4882a593Smuzhiyun */
qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt ** pkt)534*4882a593Smuzhiyun static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun const int pkt_len = sizeof(struct qrtr_ctrl_pkt);
537*4882a593Smuzhiyun struct sk_buff *skb;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, GFP_KERNEL);
540*4882a593Smuzhiyun if (!skb)
541*4882a593Smuzhiyun return NULL;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun skb_reserve(skb, QRTR_HDR_MAX_SIZE);
544*4882a593Smuzhiyun *pkt = skb_put_zero(skb, pkt_len);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun return skb;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /**
550*4882a593Smuzhiyun * qrtr_endpoint_register() - register a new endpoint
551*4882a593Smuzhiyun * @ep: endpoint to register
552*4882a593Smuzhiyun * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
553*4882a593Smuzhiyun * Return: 0 on success; negative error code on failure
554*4882a593Smuzhiyun *
555*4882a593Smuzhiyun * The specified endpoint must have the xmit function pointer set on call.
556*4882a593Smuzhiyun */
qrtr_endpoint_register(struct qrtr_endpoint * ep,unsigned int nid)557*4882a593Smuzhiyun int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun struct qrtr_node *node;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (!ep || !ep->xmit)
562*4882a593Smuzhiyun return -EINVAL;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun node = kzalloc(sizeof(*node), GFP_KERNEL);
565*4882a593Smuzhiyun if (!node)
566*4882a593Smuzhiyun return -ENOMEM;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun kref_init(&node->ref);
569*4882a593Smuzhiyun mutex_init(&node->ep_lock);
570*4882a593Smuzhiyun skb_queue_head_init(&node->rx_queue);
571*4882a593Smuzhiyun node->nid = QRTR_EP_NID_AUTO;
572*4882a593Smuzhiyun node->ep = ep;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
575*4882a593Smuzhiyun mutex_init(&node->qrtr_tx_lock);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun qrtr_node_assign(node, nid);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun mutex_lock(&qrtr_node_lock);
580*4882a593Smuzhiyun list_add(&node->item, &qrtr_all_nodes);
581*4882a593Smuzhiyun mutex_unlock(&qrtr_node_lock);
582*4882a593Smuzhiyun ep->node = node;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun return 0;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /**
589*4882a593Smuzhiyun * qrtr_endpoint_unregister - unregister endpoint
590*4882a593Smuzhiyun * @ep: endpoint to unregister
591*4882a593Smuzhiyun */
qrtr_endpoint_unregister(struct qrtr_endpoint * ep)592*4882a593Smuzhiyun void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun struct qrtr_node *node = ep->node;
595*4882a593Smuzhiyun struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
596*4882a593Smuzhiyun struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
597*4882a593Smuzhiyun struct radix_tree_iter iter;
598*4882a593Smuzhiyun struct qrtr_ctrl_pkt *pkt;
599*4882a593Smuzhiyun struct qrtr_tx_flow *flow;
600*4882a593Smuzhiyun struct sk_buff *skb;
601*4882a593Smuzhiyun void __rcu **slot;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun mutex_lock(&node->ep_lock);
604*4882a593Smuzhiyun node->ep = NULL;
605*4882a593Smuzhiyun mutex_unlock(&node->ep_lock);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /* Notify the local controller about the event */
608*4882a593Smuzhiyun skb = qrtr_alloc_ctrl_packet(&pkt);
609*4882a593Smuzhiyun if (skb) {
610*4882a593Smuzhiyun pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
611*4882a593Smuzhiyun qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /* Wake up any transmitters waiting for resume-tx from the node */
615*4882a593Smuzhiyun mutex_lock(&node->qrtr_tx_lock);
616*4882a593Smuzhiyun radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
617*4882a593Smuzhiyun flow = *slot;
618*4882a593Smuzhiyun wake_up_interruptible_all(&flow->resume_tx);
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun mutex_unlock(&node->qrtr_tx_lock);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun qrtr_node_release(node);
623*4882a593Smuzhiyun ep->node = NULL;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* Lookup socket by port.
628*4882a593Smuzhiyun *
629*4882a593Smuzhiyun * Callers must release with qrtr_port_put()
630*4882a593Smuzhiyun */
qrtr_port_lookup(int port)631*4882a593Smuzhiyun static struct qrtr_sock *qrtr_port_lookup(int port)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun struct qrtr_sock *ipc;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (port == QRTR_PORT_CTRL)
636*4882a593Smuzhiyun port = 0;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun rcu_read_lock();
639*4882a593Smuzhiyun ipc = xa_load(&qrtr_ports, port);
640*4882a593Smuzhiyun if (ipc)
641*4882a593Smuzhiyun sock_hold(&ipc->sk);
642*4882a593Smuzhiyun rcu_read_unlock();
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun return ipc;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /* Release acquired socket. */
qrtr_port_put(struct qrtr_sock * ipc)648*4882a593Smuzhiyun static void qrtr_port_put(struct qrtr_sock *ipc)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun sock_put(&ipc->sk);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun /* Remove port assignment. */
qrtr_port_remove(struct qrtr_sock * ipc)654*4882a593Smuzhiyun static void qrtr_port_remove(struct qrtr_sock *ipc)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun struct qrtr_ctrl_pkt *pkt;
657*4882a593Smuzhiyun struct sk_buff *skb;
658*4882a593Smuzhiyun int port = ipc->us.sq_port;
659*4882a593Smuzhiyun struct sockaddr_qrtr to;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun to.sq_family = AF_QIPCRTR;
662*4882a593Smuzhiyun to.sq_node = QRTR_NODE_BCAST;
663*4882a593Smuzhiyun to.sq_port = QRTR_PORT_CTRL;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun skb = qrtr_alloc_ctrl_packet(&pkt);
666*4882a593Smuzhiyun if (skb) {
667*4882a593Smuzhiyun pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
668*4882a593Smuzhiyun pkt->client.node = cpu_to_le32(ipc->us.sq_node);
669*4882a593Smuzhiyun pkt->client.port = cpu_to_le32(ipc->us.sq_port);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun skb_set_owner_w(skb, &ipc->sk);
672*4882a593Smuzhiyun qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us,
673*4882a593Smuzhiyun &to);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (port == QRTR_PORT_CTRL)
677*4882a593Smuzhiyun port = 0;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun __sock_put(&ipc->sk);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun xa_erase(&qrtr_ports, port);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /* Ensure that if qrtr_port_lookup() did enter the RCU read section we
684*4882a593Smuzhiyun * wait for it to up increment the refcount */
685*4882a593Smuzhiyun synchronize_rcu();
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /* Assign port number to socket.
689*4882a593Smuzhiyun *
690*4882a593Smuzhiyun * Specify port in the integer pointed to by port, and it will be adjusted
691*4882a593Smuzhiyun * on return as necesssary.
692*4882a593Smuzhiyun *
693*4882a593Smuzhiyun * Port may be:
694*4882a593Smuzhiyun * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
695*4882a593Smuzhiyun * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
696*4882a593Smuzhiyun * >QRTR_MIN_EPH_SOCKET: Specified; available to all
697*4882a593Smuzhiyun */
qrtr_port_assign(struct qrtr_sock * ipc,int * port)698*4882a593Smuzhiyun static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun int rc;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (!*port) {
703*4882a593Smuzhiyun rc = xa_alloc(&qrtr_ports, port, ipc, QRTR_EPH_PORT_RANGE,
704*4882a593Smuzhiyun GFP_KERNEL);
705*4882a593Smuzhiyun } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
706*4882a593Smuzhiyun rc = -EACCES;
707*4882a593Smuzhiyun } else if (*port == QRTR_PORT_CTRL) {
708*4882a593Smuzhiyun rc = xa_insert(&qrtr_ports, 0, ipc, GFP_KERNEL);
709*4882a593Smuzhiyun } else {
710*4882a593Smuzhiyun rc = xa_insert(&qrtr_ports, *port, ipc, GFP_KERNEL);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun if (rc == -EBUSY)
714*4882a593Smuzhiyun return -EADDRINUSE;
715*4882a593Smuzhiyun else if (rc < 0)
716*4882a593Smuzhiyun return rc;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun sock_hold(&ipc->sk);
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun return 0;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun /* Reset all non-control ports */
qrtr_reset_ports(void)724*4882a593Smuzhiyun static void qrtr_reset_ports(void)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun struct qrtr_sock *ipc;
727*4882a593Smuzhiyun unsigned long index;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun rcu_read_lock();
730*4882a593Smuzhiyun xa_for_each_start(&qrtr_ports, index, ipc, 1) {
731*4882a593Smuzhiyun sock_hold(&ipc->sk);
732*4882a593Smuzhiyun ipc->sk.sk_err = ENETRESET;
733*4882a593Smuzhiyun ipc->sk.sk_error_report(&ipc->sk);
734*4882a593Smuzhiyun sock_put(&ipc->sk);
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun rcu_read_unlock();
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /* Bind socket to address.
740*4882a593Smuzhiyun *
741*4882a593Smuzhiyun * Socket should be locked upon call.
742*4882a593Smuzhiyun */
__qrtr_bind(struct socket * sock,const struct sockaddr_qrtr * addr,int zapped)743*4882a593Smuzhiyun static int __qrtr_bind(struct socket *sock,
744*4882a593Smuzhiyun const struct sockaddr_qrtr *addr, int zapped)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun struct qrtr_sock *ipc = qrtr_sk(sock->sk);
747*4882a593Smuzhiyun struct sock *sk = sock->sk;
748*4882a593Smuzhiyun int port;
749*4882a593Smuzhiyun int rc;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /* rebinding ok */
752*4882a593Smuzhiyun if (!zapped && addr->sq_port == ipc->us.sq_port)
753*4882a593Smuzhiyun return 0;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun port = addr->sq_port;
756*4882a593Smuzhiyun rc = qrtr_port_assign(ipc, &port);
757*4882a593Smuzhiyun if (rc)
758*4882a593Smuzhiyun return rc;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /* unbind previous, if any */
761*4882a593Smuzhiyun if (!zapped)
762*4882a593Smuzhiyun qrtr_port_remove(ipc);
763*4882a593Smuzhiyun ipc->us.sq_port = port;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun sock_reset_flag(sk, SOCK_ZAPPED);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /* Notify all open ports about the new controller */
768*4882a593Smuzhiyun if (port == QRTR_PORT_CTRL)
769*4882a593Smuzhiyun qrtr_reset_ports();
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun return 0;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun /* Auto bind to an ephemeral port. */
qrtr_autobind(struct socket * sock)775*4882a593Smuzhiyun static int qrtr_autobind(struct socket *sock)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun struct sock *sk = sock->sk;
778*4882a593Smuzhiyun struct sockaddr_qrtr addr;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (!sock_flag(sk, SOCK_ZAPPED))
781*4882a593Smuzhiyun return 0;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun addr.sq_family = AF_QIPCRTR;
784*4882a593Smuzhiyun addr.sq_node = qrtr_local_nid;
785*4882a593Smuzhiyun addr.sq_port = 0;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun return __qrtr_bind(sock, &addr, 1);
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /* Bind socket to specified sockaddr. */
qrtr_bind(struct socket * sock,struct sockaddr * saddr,int len)791*4882a593Smuzhiyun static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
794*4882a593Smuzhiyun struct qrtr_sock *ipc = qrtr_sk(sock->sk);
795*4882a593Smuzhiyun struct sock *sk = sock->sk;
796*4882a593Smuzhiyun int rc;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
799*4882a593Smuzhiyun return -EINVAL;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun if (addr->sq_node != ipc->us.sq_node)
802*4882a593Smuzhiyun return -EINVAL;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun lock_sock(sk);
805*4882a593Smuzhiyun rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
806*4882a593Smuzhiyun release_sock(sk);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun return rc;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun /* Queue packet to local peer socket. */
qrtr_local_enqueue(struct qrtr_node * node,struct sk_buff * skb,int type,struct sockaddr_qrtr * from,struct sockaddr_qrtr * to)812*4882a593Smuzhiyun static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
813*4882a593Smuzhiyun int type, struct sockaddr_qrtr *from,
814*4882a593Smuzhiyun struct sockaddr_qrtr *to)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun struct qrtr_sock *ipc;
817*4882a593Smuzhiyun struct qrtr_cb *cb;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun ipc = qrtr_port_lookup(to->sq_port);
820*4882a593Smuzhiyun if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
821*4882a593Smuzhiyun if (ipc)
822*4882a593Smuzhiyun qrtr_port_put(ipc);
823*4882a593Smuzhiyun kfree_skb(skb);
824*4882a593Smuzhiyun return -ENODEV;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun cb = (struct qrtr_cb *)skb->cb;
828*4882a593Smuzhiyun cb->src_node = from->sq_node;
829*4882a593Smuzhiyun cb->src_port = from->sq_port;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun if (sock_queue_rcv_skb(&ipc->sk, skb)) {
832*4882a593Smuzhiyun qrtr_port_put(ipc);
833*4882a593Smuzhiyun kfree_skb(skb);
834*4882a593Smuzhiyun return -ENOSPC;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun qrtr_port_put(ipc);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun return 0;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun /* Queue packet for broadcast. */
qrtr_bcast_enqueue(struct qrtr_node * node,struct sk_buff * skb,int type,struct sockaddr_qrtr * from,struct sockaddr_qrtr * to)843*4882a593Smuzhiyun static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
844*4882a593Smuzhiyun int type, struct sockaddr_qrtr *from,
845*4882a593Smuzhiyun struct sockaddr_qrtr *to)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun struct sk_buff *skbn;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun mutex_lock(&qrtr_node_lock);
850*4882a593Smuzhiyun list_for_each_entry(node, &qrtr_all_nodes, item) {
851*4882a593Smuzhiyun skbn = skb_clone(skb, GFP_KERNEL);
852*4882a593Smuzhiyun if (!skbn)
853*4882a593Smuzhiyun break;
854*4882a593Smuzhiyun skb_set_owner_w(skbn, skb->sk);
855*4882a593Smuzhiyun qrtr_node_enqueue(node, skbn, type, from, to);
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun mutex_unlock(&qrtr_node_lock);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun qrtr_local_enqueue(NULL, skb, type, from, to);
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun return 0;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
qrtr_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)864*4882a593Smuzhiyun static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
867*4882a593Smuzhiyun int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
868*4882a593Smuzhiyun struct sockaddr_qrtr *, struct sockaddr_qrtr *);
869*4882a593Smuzhiyun __le32 qrtr_type = cpu_to_le32(QRTR_TYPE_DATA);
870*4882a593Smuzhiyun struct qrtr_sock *ipc = qrtr_sk(sock->sk);
871*4882a593Smuzhiyun struct sock *sk = sock->sk;
872*4882a593Smuzhiyun struct qrtr_node *node;
873*4882a593Smuzhiyun struct sk_buff *skb;
874*4882a593Smuzhiyun size_t plen;
875*4882a593Smuzhiyun u32 type;
876*4882a593Smuzhiyun int rc;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun if (msg->msg_flags & ~(MSG_DONTWAIT))
879*4882a593Smuzhiyun return -EINVAL;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun if (len > 65535)
882*4882a593Smuzhiyun return -EMSGSIZE;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun lock_sock(sk);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun if (addr) {
887*4882a593Smuzhiyun if (msg->msg_namelen < sizeof(*addr)) {
888*4882a593Smuzhiyun release_sock(sk);
889*4882a593Smuzhiyun return -EINVAL;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun if (addr->sq_family != AF_QIPCRTR) {
893*4882a593Smuzhiyun release_sock(sk);
894*4882a593Smuzhiyun return -EINVAL;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun rc = qrtr_autobind(sock);
898*4882a593Smuzhiyun if (rc) {
899*4882a593Smuzhiyun release_sock(sk);
900*4882a593Smuzhiyun return rc;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun } else if (sk->sk_state == TCP_ESTABLISHED) {
903*4882a593Smuzhiyun addr = &ipc->peer;
904*4882a593Smuzhiyun } else {
905*4882a593Smuzhiyun release_sock(sk);
906*4882a593Smuzhiyun return -ENOTCONN;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun node = NULL;
910*4882a593Smuzhiyun if (addr->sq_node == QRTR_NODE_BCAST) {
911*4882a593Smuzhiyun if (addr->sq_port != QRTR_PORT_CTRL &&
912*4882a593Smuzhiyun qrtr_local_nid != QRTR_NODE_BCAST) {
913*4882a593Smuzhiyun release_sock(sk);
914*4882a593Smuzhiyun return -ENOTCONN;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun enqueue_fn = qrtr_bcast_enqueue;
917*4882a593Smuzhiyun } else if (addr->sq_node == ipc->us.sq_node) {
918*4882a593Smuzhiyun enqueue_fn = qrtr_local_enqueue;
919*4882a593Smuzhiyun } else {
920*4882a593Smuzhiyun node = qrtr_node_lookup(addr->sq_node);
921*4882a593Smuzhiyun if (!node) {
922*4882a593Smuzhiyun release_sock(sk);
923*4882a593Smuzhiyun return -ECONNRESET;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun enqueue_fn = qrtr_node_enqueue;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun plen = (len + 3) & ~3;
929*4882a593Smuzhiyun skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
930*4882a593Smuzhiyun msg->msg_flags & MSG_DONTWAIT, &rc);
931*4882a593Smuzhiyun if (!skb) {
932*4882a593Smuzhiyun rc = -ENOMEM;
933*4882a593Smuzhiyun goto out_node;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun skb_reserve(skb, QRTR_HDR_MAX_SIZE);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun rc = memcpy_from_msg(skb_put(skb, len), msg, len);
939*4882a593Smuzhiyun if (rc) {
940*4882a593Smuzhiyun kfree_skb(skb);
941*4882a593Smuzhiyun goto out_node;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (ipc->us.sq_port == QRTR_PORT_CTRL) {
945*4882a593Smuzhiyun if (len < 4) {
946*4882a593Smuzhiyun rc = -EINVAL;
947*4882a593Smuzhiyun kfree_skb(skb);
948*4882a593Smuzhiyun goto out_node;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /* control messages already require the type as 'command' */
952*4882a593Smuzhiyun skb_copy_bits(skb, 0, &qrtr_type, 4);
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun type = le32_to_cpu(qrtr_type);
956*4882a593Smuzhiyun rc = enqueue_fn(node, skb, type, &ipc->us, addr);
957*4882a593Smuzhiyun if (rc >= 0)
958*4882a593Smuzhiyun rc = len;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun out_node:
961*4882a593Smuzhiyun qrtr_node_release(node);
962*4882a593Smuzhiyun release_sock(sk);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun return rc;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
qrtr_send_resume_tx(struct qrtr_cb * cb)967*4882a593Smuzhiyun static int qrtr_send_resume_tx(struct qrtr_cb *cb)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun struct sockaddr_qrtr remote = { AF_QIPCRTR, cb->src_node, cb->src_port };
970*4882a593Smuzhiyun struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port };
971*4882a593Smuzhiyun struct qrtr_ctrl_pkt *pkt;
972*4882a593Smuzhiyun struct qrtr_node *node;
973*4882a593Smuzhiyun struct sk_buff *skb;
974*4882a593Smuzhiyun int ret;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun node = qrtr_node_lookup(remote.sq_node);
977*4882a593Smuzhiyun if (!node)
978*4882a593Smuzhiyun return -EINVAL;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun skb = qrtr_alloc_ctrl_packet(&pkt);
981*4882a593Smuzhiyun if (!skb)
982*4882a593Smuzhiyun return -ENOMEM;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
985*4882a593Smuzhiyun pkt->client.node = cpu_to_le32(cb->dst_node);
986*4882a593Smuzhiyun pkt->client.port = cpu_to_le32(cb->dst_port);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, &local, &remote);
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun qrtr_node_release(node);
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun return ret;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
qrtr_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)995*4882a593Smuzhiyun static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
996*4882a593Smuzhiyun size_t size, int flags)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
999*4882a593Smuzhiyun struct sock *sk = sock->sk;
1000*4882a593Smuzhiyun struct sk_buff *skb;
1001*4882a593Smuzhiyun struct qrtr_cb *cb;
1002*4882a593Smuzhiyun int copied, rc;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun lock_sock(sk);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun if (sock_flag(sk, SOCK_ZAPPED)) {
1007*4882a593Smuzhiyun release_sock(sk);
1008*4882a593Smuzhiyun return -EADDRNOTAVAIL;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1012*4882a593Smuzhiyun flags & MSG_DONTWAIT, &rc);
1013*4882a593Smuzhiyun if (!skb) {
1014*4882a593Smuzhiyun release_sock(sk);
1015*4882a593Smuzhiyun return rc;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun cb = (struct qrtr_cb *)skb->cb;
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun copied = skb->len;
1020*4882a593Smuzhiyun if (copied > size) {
1021*4882a593Smuzhiyun copied = size;
1022*4882a593Smuzhiyun msg->msg_flags |= MSG_TRUNC;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun rc = skb_copy_datagram_msg(skb, 0, msg, copied);
1026*4882a593Smuzhiyun if (rc < 0)
1027*4882a593Smuzhiyun goto out;
1028*4882a593Smuzhiyun rc = copied;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun if (addr) {
1031*4882a593Smuzhiyun /* There is an anonymous 2-byte hole after sq_family,
1032*4882a593Smuzhiyun * make sure to clear it.
1033*4882a593Smuzhiyun */
1034*4882a593Smuzhiyun memset(addr, 0, sizeof(*addr));
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun addr->sq_family = AF_QIPCRTR;
1037*4882a593Smuzhiyun addr->sq_node = cb->src_node;
1038*4882a593Smuzhiyun addr->sq_port = cb->src_port;
1039*4882a593Smuzhiyun msg->msg_namelen = sizeof(*addr);
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun out:
1043*4882a593Smuzhiyun if (cb->confirm_rx)
1044*4882a593Smuzhiyun qrtr_send_resume_tx(cb);
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun skb_free_datagram(sk, skb);
1047*4882a593Smuzhiyun release_sock(sk);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun return rc;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
qrtr_connect(struct socket * sock,struct sockaddr * saddr,int len,int flags)1052*4882a593Smuzhiyun static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
1053*4882a593Smuzhiyun int len, int flags)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
1056*4882a593Smuzhiyun struct qrtr_sock *ipc = qrtr_sk(sock->sk);
1057*4882a593Smuzhiyun struct sock *sk = sock->sk;
1058*4882a593Smuzhiyun int rc;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
1061*4882a593Smuzhiyun return -EINVAL;
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun lock_sock(sk);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun sk->sk_state = TCP_CLOSE;
1066*4882a593Smuzhiyun sock->state = SS_UNCONNECTED;
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun rc = qrtr_autobind(sock);
1069*4882a593Smuzhiyun if (rc) {
1070*4882a593Smuzhiyun release_sock(sk);
1071*4882a593Smuzhiyun return rc;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun ipc->peer = *addr;
1075*4882a593Smuzhiyun sock->state = SS_CONNECTED;
1076*4882a593Smuzhiyun sk->sk_state = TCP_ESTABLISHED;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun release_sock(sk);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun return 0;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun
qrtr_getname(struct socket * sock,struct sockaddr * saddr,int peer)1083*4882a593Smuzhiyun static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
1084*4882a593Smuzhiyun int peer)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun struct qrtr_sock *ipc = qrtr_sk(sock->sk);
1087*4882a593Smuzhiyun struct sockaddr_qrtr qaddr;
1088*4882a593Smuzhiyun struct sock *sk = sock->sk;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun lock_sock(sk);
1091*4882a593Smuzhiyun if (peer) {
1092*4882a593Smuzhiyun if (sk->sk_state != TCP_ESTABLISHED) {
1093*4882a593Smuzhiyun release_sock(sk);
1094*4882a593Smuzhiyun return -ENOTCONN;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun qaddr = ipc->peer;
1098*4882a593Smuzhiyun } else {
1099*4882a593Smuzhiyun qaddr = ipc->us;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun release_sock(sk);
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun qaddr.sq_family = AF_QIPCRTR;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun memcpy(saddr, &qaddr, sizeof(qaddr));
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun return sizeof(qaddr);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
qrtr_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1110*4882a593Smuzhiyun static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun void __user *argp = (void __user *)arg;
1113*4882a593Smuzhiyun struct qrtr_sock *ipc = qrtr_sk(sock->sk);
1114*4882a593Smuzhiyun struct sock *sk = sock->sk;
1115*4882a593Smuzhiyun struct sockaddr_qrtr *sq;
1116*4882a593Smuzhiyun struct sk_buff *skb;
1117*4882a593Smuzhiyun struct ifreq ifr;
1118*4882a593Smuzhiyun long len = 0;
1119*4882a593Smuzhiyun int rc = 0;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun lock_sock(sk);
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun switch (cmd) {
1124*4882a593Smuzhiyun case TIOCOUTQ:
1125*4882a593Smuzhiyun len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1126*4882a593Smuzhiyun if (len < 0)
1127*4882a593Smuzhiyun len = 0;
1128*4882a593Smuzhiyun rc = put_user(len, (int __user *)argp);
1129*4882a593Smuzhiyun break;
1130*4882a593Smuzhiyun case TIOCINQ:
1131*4882a593Smuzhiyun skb = skb_peek(&sk->sk_receive_queue);
1132*4882a593Smuzhiyun if (skb)
1133*4882a593Smuzhiyun len = skb->len;
1134*4882a593Smuzhiyun rc = put_user(len, (int __user *)argp);
1135*4882a593Smuzhiyun break;
1136*4882a593Smuzhiyun case SIOCGIFADDR:
1137*4882a593Smuzhiyun if (copy_from_user(&ifr, argp, sizeof(ifr))) {
1138*4882a593Smuzhiyun rc = -EFAULT;
1139*4882a593Smuzhiyun break;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
1143*4882a593Smuzhiyun *sq = ipc->us;
1144*4882a593Smuzhiyun if (copy_to_user(argp, &ifr, sizeof(ifr))) {
1145*4882a593Smuzhiyun rc = -EFAULT;
1146*4882a593Smuzhiyun break;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun break;
1149*4882a593Smuzhiyun case SIOCADDRT:
1150*4882a593Smuzhiyun case SIOCDELRT:
1151*4882a593Smuzhiyun case SIOCSIFADDR:
1152*4882a593Smuzhiyun case SIOCGIFDSTADDR:
1153*4882a593Smuzhiyun case SIOCSIFDSTADDR:
1154*4882a593Smuzhiyun case SIOCGIFBRDADDR:
1155*4882a593Smuzhiyun case SIOCSIFBRDADDR:
1156*4882a593Smuzhiyun case SIOCGIFNETMASK:
1157*4882a593Smuzhiyun case SIOCSIFNETMASK:
1158*4882a593Smuzhiyun rc = -EINVAL;
1159*4882a593Smuzhiyun break;
1160*4882a593Smuzhiyun default:
1161*4882a593Smuzhiyun rc = -ENOIOCTLCMD;
1162*4882a593Smuzhiyun break;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun release_sock(sk);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun return rc;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun
qrtr_release(struct socket * sock)1170*4882a593Smuzhiyun static int qrtr_release(struct socket *sock)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun struct sock *sk = sock->sk;
1173*4882a593Smuzhiyun struct qrtr_sock *ipc;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun if (!sk)
1176*4882a593Smuzhiyun return 0;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun lock_sock(sk);
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun ipc = qrtr_sk(sk);
1181*4882a593Smuzhiyun sk->sk_shutdown = SHUTDOWN_MASK;
1182*4882a593Smuzhiyun if (!sock_flag(sk, SOCK_DEAD))
1183*4882a593Smuzhiyun sk->sk_state_change(sk);
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun sock_set_flag(sk, SOCK_DEAD);
1186*4882a593Smuzhiyun sock_orphan(sk);
1187*4882a593Smuzhiyun sock->sk = NULL;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun if (!sock_flag(sk, SOCK_ZAPPED))
1190*4882a593Smuzhiyun qrtr_port_remove(ipc);
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun skb_queue_purge(&sk->sk_receive_queue);
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun release_sock(sk);
1195*4882a593Smuzhiyun sock_put(sk);
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun return 0;
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun static const struct proto_ops qrtr_proto_ops = {
1201*4882a593Smuzhiyun .owner = THIS_MODULE,
1202*4882a593Smuzhiyun .family = AF_QIPCRTR,
1203*4882a593Smuzhiyun .bind = qrtr_bind,
1204*4882a593Smuzhiyun .connect = qrtr_connect,
1205*4882a593Smuzhiyun .socketpair = sock_no_socketpair,
1206*4882a593Smuzhiyun .accept = sock_no_accept,
1207*4882a593Smuzhiyun .listen = sock_no_listen,
1208*4882a593Smuzhiyun .sendmsg = qrtr_sendmsg,
1209*4882a593Smuzhiyun .recvmsg = qrtr_recvmsg,
1210*4882a593Smuzhiyun .getname = qrtr_getname,
1211*4882a593Smuzhiyun .ioctl = qrtr_ioctl,
1212*4882a593Smuzhiyun .gettstamp = sock_gettstamp,
1213*4882a593Smuzhiyun .poll = datagram_poll,
1214*4882a593Smuzhiyun .shutdown = sock_no_shutdown,
1215*4882a593Smuzhiyun .release = qrtr_release,
1216*4882a593Smuzhiyun .mmap = sock_no_mmap,
1217*4882a593Smuzhiyun .sendpage = sock_no_sendpage,
1218*4882a593Smuzhiyun };
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun static struct proto qrtr_proto = {
1221*4882a593Smuzhiyun .name = "QIPCRTR",
1222*4882a593Smuzhiyun .owner = THIS_MODULE,
1223*4882a593Smuzhiyun .obj_size = sizeof(struct qrtr_sock),
1224*4882a593Smuzhiyun };
1225*4882a593Smuzhiyun
qrtr_create(struct net * net,struct socket * sock,int protocol,int kern)1226*4882a593Smuzhiyun static int qrtr_create(struct net *net, struct socket *sock,
1227*4882a593Smuzhiyun int protocol, int kern)
1228*4882a593Smuzhiyun {
1229*4882a593Smuzhiyun struct qrtr_sock *ipc;
1230*4882a593Smuzhiyun struct sock *sk;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun if (sock->type != SOCK_DGRAM)
1233*4882a593Smuzhiyun return -EPROTOTYPE;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
1236*4882a593Smuzhiyun if (!sk)
1237*4882a593Smuzhiyun return -ENOMEM;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun sock_set_flag(sk, SOCK_ZAPPED);
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun sock_init_data(sock, sk);
1242*4882a593Smuzhiyun sock->ops = &qrtr_proto_ops;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun ipc = qrtr_sk(sk);
1245*4882a593Smuzhiyun ipc->us.sq_family = AF_QIPCRTR;
1246*4882a593Smuzhiyun ipc->us.sq_node = qrtr_local_nid;
1247*4882a593Smuzhiyun ipc->us.sq_port = 0;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun return 0;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun static const struct net_proto_family qrtr_family = {
1253*4882a593Smuzhiyun .owner = THIS_MODULE,
1254*4882a593Smuzhiyun .family = AF_QIPCRTR,
1255*4882a593Smuzhiyun .create = qrtr_create,
1256*4882a593Smuzhiyun };
1257*4882a593Smuzhiyun
qrtr_proto_init(void)1258*4882a593Smuzhiyun static int __init qrtr_proto_init(void)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun int rc;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun rc = proto_register(&qrtr_proto, 1);
1263*4882a593Smuzhiyun if (rc)
1264*4882a593Smuzhiyun return rc;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun rc = sock_register(&qrtr_family);
1267*4882a593Smuzhiyun if (rc) {
1268*4882a593Smuzhiyun proto_unregister(&qrtr_proto);
1269*4882a593Smuzhiyun return rc;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun qrtr_ns_init();
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun return rc;
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun postcore_initcall(qrtr_proto_init);
1277*4882a593Smuzhiyun
qrtr_proto_fini(void)1278*4882a593Smuzhiyun static void __exit qrtr_proto_fini(void)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun qrtr_ns_remove();
1281*4882a593Smuzhiyun sock_unregister(qrtr_family.family);
1282*4882a593Smuzhiyun proto_unregister(&qrtr_proto);
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun module_exit(qrtr_proto_fini);
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun MODULE_DESCRIPTION("Qualcomm IPC-router driver");
1287*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1288*4882a593Smuzhiyun MODULE_ALIAS_NETPROTO(PF_QIPCRTR);
1289