1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Kernel Connection Multiplexor
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/bpf.h>
9*4882a593Smuzhiyun #include <linux/errno.h>
10*4882a593Smuzhiyun #include <linux/errqueue.h>
11*4882a593Smuzhiyun #include <linux/file.h>
12*4882a593Smuzhiyun #include <linux/in.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/net.h>
16*4882a593Smuzhiyun #include <linux/netdevice.h>
17*4882a593Smuzhiyun #include <linux/poll.h>
18*4882a593Smuzhiyun #include <linux/rculist.h>
19*4882a593Smuzhiyun #include <linux/skbuff.h>
20*4882a593Smuzhiyun #include <linux/socket.h>
21*4882a593Smuzhiyun #include <linux/uaccess.h>
22*4882a593Smuzhiyun #include <linux/workqueue.h>
23*4882a593Smuzhiyun #include <linux/syscalls.h>
24*4882a593Smuzhiyun #include <linux/sched/signal.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <net/kcm.h>
27*4882a593Smuzhiyun #include <net/netns/generic.h>
28*4882a593Smuzhiyun #include <net/sock.h>
29*4882a593Smuzhiyun #include <uapi/linux/kcm.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun unsigned int kcm_net_id;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun static struct kmem_cache *kcm_psockp __read_mostly;
34*4882a593Smuzhiyun static struct kmem_cache *kcm_muxp __read_mostly;
35*4882a593Smuzhiyun static struct workqueue_struct *kcm_wq;
36*4882a593Smuzhiyun
kcm_sk(const struct sock * sk)37*4882a593Smuzhiyun static inline struct kcm_sock *kcm_sk(const struct sock *sk)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun return (struct kcm_sock *)sk;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
kcm_tx_msg(struct sk_buff * skb)42*4882a593Smuzhiyun static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun return (struct kcm_tx_msg *)skb->cb;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
report_csk_error(struct sock * csk,int err)47*4882a593Smuzhiyun static void report_csk_error(struct sock *csk, int err)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun csk->sk_err = EPIPE;
50*4882a593Smuzhiyun csk->sk_error_report(csk);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
kcm_abort_tx_psock(struct kcm_psock * psock,int err,bool wakeup_kcm)53*4882a593Smuzhiyun static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
54*4882a593Smuzhiyun bool wakeup_kcm)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct sock *csk = psock->sk;
57*4882a593Smuzhiyun struct kcm_mux *mux = psock->mux;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* Unrecoverable error in transmit */
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun if (psock->tx_stopped) {
64*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
65*4882a593Smuzhiyun return;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun psock->tx_stopped = 1;
69*4882a593Smuzhiyun KCM_STATS_INCR(psock->stats.tx_aborts);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (!psock->tx_kcm) {
72*4882a593Smuzhiyun /* Take off psocks_avail list */
73*4882a593Smuzhiyun list_del(&psock->psock_avail_list);
74*4882a593Smuzhiyun } else if (wakeup_kcm) {
75*4882a593Smuzhiyun /* In this case psock is being aborted while outside of
76*4882a593Smuzhiyun * write_msgs and psock is reserved. Schedule tx_work
77*4882a593Smuzhiyun * to handle the failure there. Need to commit tx_stopped
78*4882a593Smuzhiyun * before queuing work.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun smp_mb();
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun queue_work(kcm_wq, &psock->tx_kcm->tx_work);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* Report error on lower socket */
88*4882a593Smuzhiyun report_csk_error(csk, err);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* RX mux lock held. */
kcm_update_rx_mux_stats(struct kcm_mux * mux,struct kcm_psock * psock)92*4882a593Smuzhiyun static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
93*4882a593Smuzhiyun struct kcm_psock *psock)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun STRP_STATS_ADD(mux->stats.rx_bytes,
96*4882a593Smuzhiyun psock->strp.stats.bytes -
97*4882a593Smuzhiyun psock->saved_rx_bytes);
98*4882a593Smuzhiyun mux->stats.rx_msgs +=
99*4882a593Smuzhiyun psock->strp.stats.msgs - psock->saved_rx_msgs;
100*4882a593Smuzhiyun psock->saved_rx_msgs = psock->strp.stats.msgs;
101*4882a593Smuzhiyun psock->saved_rx_bytes = psock->strp.stats.bytes;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
kcm_update_tx_mux_stats(struct kcm_mux * mux,struct kcm_psock * psock)104*4882a593Smuzhiyun static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
105*4882a593Smuzhiyun struct kcm_psock *psock)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun KCM_STATS_ADD(mux->stats.tx_bytes,
108*4882a593Smuzhiyun psock->stats.tx_bytes - psock->saved_tx_bytes);
109*4882a593Smuzhiyun mux->stats.tx_msgs +=
110*4882a593Smuzhiyun psock->stats.tx_msgs - psock->saved_tx_msgs;
111*4882a593Smuzhiyun psock->saved_tx_msgs = psock->stats.tx_msgs;
112*4882a593Smuzhiyun psock->saved_tx_bytes = psock->stats.tx_bytes;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* KCM is ready to receive messages on its queue-- either the KCM is new or
118*4882a593Smuzhiyun * has become unblocked after being blocked on full socket buffer. Queue any
119*4882a593Smuzhiyun * pending ready messages on a psock. RX mux lock held.
120*4882a593Smuzhiyun */
kcm_rcv_ready(struct kcm_sock * kcm)121*4882a593Smuzhiyun static void kcm_rcv_ready(struct kcm_sock *kcm)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun struct kcm_mux *mux = kcm->mux;
124*4882a593Smuzhiyun struct kcm_psock *psock;
125*4882a593Smuzhiyun struct sk_buff *skb;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
128*4882a593Smuzhiyun return;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
131*4882a593Smuzhiyun if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
132*4882a593Smuzhiyun /* Assuming buffer limit has been reached */
133*4882a593Smuzhiyun skb_queue_head(&mux->rx_hold_queue, skb);
134*4882a593Smuzhiyun WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
135*4882a593Smuzhiyun return;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun while (!list_empty(&mux->psocks_ready)) {
140*4882a593Smuzhiyun psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
141*4882a593Smuzhiyun psock_ready_list);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
144*4882a593Smuzhiyun /* Assuming buffer limit has been reached */
145*4882a593Smuzhiyun WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
146*4882a593Smuzhiyun return;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* Consumed the ready message on the psock. Schedule rx_work to
150*4882a593Smuzhiyun * get more messages.
151*4882a593Smuzhiyun */
152*4882a593Smuzhiyun list_del(&psock->psock_ready_list);
153*4882a593Smuzhiyun psock->ready_rx_msg = NULL;
154*4882a593Smuzhiyun /* Commit clearing of ready_rx_msg for queuing work */
155*4882a593Smuzhiyun smp_mb();
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun strp_unpause(&psock->strp);
158*4882a593Smuzhiyun strp_check_rcv(&psock->strp);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* Buffer limit is okay now, add to ready list */
162*4882a593Smuzhiyun list_add_tail(&kcm->wait_rx_list,
163*4882a593Smuzhiyun &kcm->mux->kcm_rx_waiters);
164*4882a593Smuzhiyun /* paired with lockless reads in kcm_rfree() */
165*4882a593Smuzhiyun WRITE_ONCE(kcm->rx_wait, true);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
kcm_rfree(struct sk_buff * skb)168*4882a593Smuzhiyun static void kcm_rfree(struct sk_buff *skb)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun struct sock *sk = skb->sk;
171*4882a593Smuzhiyun struct kcm_sock *kcm = kcm_sk(sk);
172*4882a593Smuzhiyun struct kcm_mux *mux = kcm->mux;
173*4882a593Smuzhiyun unsigned int len = skb->truesize;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun sk_mem_uncharge(sk, len);
176*4882a593Smuzhiyun atomic_sub(len, &sk->sk_rmem_alloc);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* For reading rx_wait and rx_psock without holding lock */
179*4882a593Smuzhiyun smp_mb__after_atomic();
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
182*4882a593Smuzhiyun sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
183*4882a593Smuzhiyun spin_lock_bh(&mux->rx_lock);
184*4882a593Smuzhiyun kcm_rcv_ready(kcm);
185*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
kcm_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)189*4882a593Smuzhiyun static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct sk_buff_head *list = &sk->sk_receive_queue;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
194*4882a593Smuzhiyun return -ENOMEM;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (!sk_rmem_schedule(sk, skb, skb->truesize))
197*4882a593Smuzhiyun return -ENOBUFS;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun skb->dev = NULL;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun skb_orphan(skb);
202*4882a593Smuzhiyun skb->sk = sk;
203*4882a593Smuzhiyun skb->destructor = kcm_rfree;
204*4882a593Smuzhiyun atomic_add(skb->truesize, &sk->sk_rmem_alloc);
205*4882a593Smuzhiyun sk_mem_charge(sk, skb->truesize);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun skb_queue_tail(list, skb);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (!sock_flag(sk, SOCK_DEAD))
210*4882a593Smuzhiyun sk->sk_data_ready(sk);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun return 0;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* Requeue received messages for a kcm socket to other kcm sockets. This is
216*4882a593Smuzhiyun * called with a kcm socket is receive disabled.
217*4882a593Smuzhiyun * RX mux lock held.
218*4882a593Smuzhiyun */
requeue_rx_msgs(struct kcm_mux * mux,struct sk_buff_head * head)219*4882a593Smuzhiyun static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun struct sk_buff *skb;
222*4882a593Smuzhiyun struct kcm_sock *kcm;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun while ((skb = skb_dequeue(head))) {
225*4882a593Smuzhiyun /* Reset destructor to avoid calling kcm_rcv_ready */
226*4882a593Smuzhiyun skb->destructor = sock_rfree;
227*4882a593Smuzhiyun skb_orphan(skb);
228*4882a593Smuzhiyun try_again:
229*4882a593Smuzhiyun if (list_empty(&mux->kcm_rx_waiters)) {
230*4882a593Smuzhiyun skb_queue_tail(&mux->rx_hold_queue, skb);
231*4882a593Smuzhiyun continue;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun kcm = list_first_entry(&mux->kcm_rx_waiters,
235*4882a593Smuzhiyun struct kcm_sock, wait_rx_list);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
238*4882a593Smuzhiyun /* Should mean socket buffer full */
239*4882a593Smuzhiyun list_del(&kcm->wait_rx_list);
240*4882a593Smuzhiyun /* paired with lockless reads in kcm_rfree() */
241*4882a593Smuzhiyun WRITE_ONCE(kcm->rx_wait, false);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* Commit rx_wait to read in kcm_free */
244*4882a593Smuzhiyun smp_wmb();
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun goto try_again;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Lower sock lock held */
reserve_rx_kcm(struct kcm_psock * psock,struct sk_buff * head)252*4882a593Smuzhiyun static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
253*4882a593Smuzhiyun struct sk_buff *head)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun struct kcm_mux *mux = psock->mux;
256*4882a593Smuzhiyun struct kcm_sock *kcm;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun WARN_ON(psock->ready_rx_msg);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (psock->rx_kcm)
261*4882a593Smuzhiyun return psock->rx_kcm;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun spin_lock_bh(&mux->rx_lock);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (psock->rx_kcm) {
266*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
267*4882a593Smuzhiyun return psock->rx_kcm;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun kcm_update_rx_mux_stats(mux, psock);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (list_empty(&mux->kcm_rx_waiters)) {
273*4882a593Smuzhiyun psock->ready_rx_msg = head;
274*4882a593Smuzhiyun strp_pause(&psock->strp);
275*4882a593Smuzhiyun list_add_tail(&psock->psock_ready_list,
276*4882a593Smuzhiyun &mux->psocks_ready);
277*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
278*4882a593Smuzhiyun return NULL;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun kcm = list_first_entry(&mux->kcm_rx_waiters,
282*4882a593Smuzhiyun struct kcm_sock, wait_rx_list);
283*4882a593Smuzhiyun list_del(&kcm->wait_rx_list);
284*4882a593Smuzhiyun /* paired with lockless reads in kcm_rfree() */
285*4882a593Smuzhiyun WRITE_ONCE(kcm->rx_wait, false);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun psock->rx_kcm = kcm;
288*4882a593Smuzhiyun /* paired with lockless reads in kcm_rfree() */
289*4882a593Smuzhiyun WRITE_ONCE(kcm->rx_psock, psock);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return kcm;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun static void kcm_done(struct kcm_sock *kcm);
297*4882a593Smuzhiyun
kcm_done_work(struct work_struct * w)298*4882a593Smuzhiyun static void kcm_done_work(struct work_struct *w)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun kcm_done(container_of(w, struct kcm_sock, done_work));
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* Lower sock held */
unreserve_rx_kcm(struct kcm_psock * psock,bool rcv_ready)304*4882a593Smuzhiyun static void unreserve_rx_kcm(struct kcm_psock *psock,
305*4882a593Smuzhiyun bool rcv_ready)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct kcm_sock *kcm = psock->rx_kcm;
308*4882a593Smuzhiyun struct kcm_mux *mux = psock->mux;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun if (!kcm)
311*4882a593Smuzhiyun return;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun spin_lock_bh(&mux->rx_lock);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun psock->rx_kcm = NULL;
316*4882a593Smuzhiyun /* paired with lockless reads in kcm_rfree() */
317*4882a593Smuzhiyun WRITE_ONCE(kcm->rx_psock, NULL);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
320*4882a593Smuzhiyun * kcm_rfree
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun smp_mb();
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (unlikely(kcm->done)) {
325*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /* Need to run kcm_done in a task since we need to qcquire
328*4882a593Smuzhiyun * callback locks which may already be held here.
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun INIT_WORK(&kcm->done_work, kcm_done_work);
331*4882a593Smuzhiyun schedule_work(&kcm->done_work);
332*4882a593Smuzhiyun return;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (unlikely(kcm->rx_disabled)) {
336*4882a593Smuzhiyun requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
337*4882a593Smuzhiyun } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
338*4882a593Smuzhiyun /* Check for degenerative race with rx_wait that all
339*4882a593Smuzhiyun * data was dequeued (accounted for in kcm_rfree).
340*4882a593Smuzhiyun */
341*4882a593Smuzhiyun kcm_rcv_ready(kcm);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* Lower sock lock held */
psock_data_ready(struct sock * sk)347*4882a593Smuzhiyun static void psock_data_ready(struct sock *sk)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun struct kcm_psock *psock;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun read_lock_bh(&sk->sk_callback_lock);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun psock = (struct kcm_psock *)sk->sk_user_data;
354*4882a593Smuzhiyun if (likely(psock))
355*4882a593Smuzhiyun strp_data_ready(&psock->strp);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun read_unlock_bh(&sk->sk_callback_lock);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /* Called with lower sock held */
kcm_rcv_strparser(struct strparser * strp,struct sk_buff * skb)361*4882a593Smuzhiyun static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
364*4882a593Smuzhiyun struct kcm_sock *kcm;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun try_queue:
367*4882a593Smuzhiyun kcm = reserve_rx_kcm(psock, skb);
368*4882a593Smuzhiyun if (!kcm) {
369*4882a593Smuzhiyun /* Unable to reserve a KCM, message is held in psock and strp
370*4882a593Smuzhiyun * is paused.
371*4882a593Smuzhiyun */
372*4882a593Smuzhiyun return;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
376*4882a593Smuzhiyun /* Should mean socket buffer full */
377*4882a593Smuzhiyun unreserve_rx_kcm(psock, false);
378*4882a593Smuzhiyun goto try_queue;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
kcm_parse_func_strparser(struct strparser * strp,struct sk_buff * skb)382*4882a593Smuzhiyun static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
385*4882a593Smuzhiyun struct bpf_prog *prog = psock->bpf_prog;
386*4882a593Smuzhiyun int res;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun res = bpf_prog_run_pin_on_cpu(prog, skb);
389*4882a593Smuzhiyun return res;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
kcm_read_sock_done(struct strparser * strp,int err)392*4882a593Smuzhiyun static int kcm_read_sock_done(struct strparser *strp, int err)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun unreserve_rx_kcm(psock, true);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun return err;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
psock_state_change(struct sock * sk)401*4882a593Smuzhiyun static void psock_state_change(struct sock *sk)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
404*4882a593Smuzhiyun * since application will normally not poll with EPOLLIN
405*4882a593Smuzhiyun * on the TCP sockets.
406*4882a593Smuzhiyun */
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun report_csk_error(sk, EPIPE);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
psock_write_space(struct sock * sk)411*4882a593Smuzhiyun static void psock_write_space(struct sock *sk)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun struct kcm_psock *psock;
414*4882a593Smuzhiyun struct kcm_mux *mux;
415*4882a593Smuzhiyun struct kcm_sock *kcm;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun read_lock_bh(&sk->sk_callback_lock);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun psock = (struct kcm_psock *)sk->sk_user_data;
420*4882a593Smuzhiyun if (unlikely(!psock))
421*4882a593Smuzhiyun goto out;
422*4882a593Smuzhiyun mux = psock->mux;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /* Check if the socket is reserved so someone is waiting for sending. */
427*4882a593Smuzhiyun kcm = psock->tx_kcm;
428*4882a593Smuzhiyun if (kcm && !unlikely(kcm->tx_stopped))
429*4882a593Smuzhiyun queue_work(kcm_wq, &kcm->tx_work);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
432*4882a593Smuzhiyun out:
433*4882a593Smuzhiyun read_unlock_bh(&sk->sk_callback_lock);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun static void unreserve_psock(struct kcm_sock *kcm);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /* kcm sock is locked. */
reserve_psock(struct kcm_sock * kcm)439*4882a593Smuzhiyun static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct kcm_mux *mux = kcm->mux;
442*4882a593Smuzhiyun struct kcm_psock *psock;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun psock = kcm->tx_psock;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun smp_rmb(); /* Must read tx_psock before tx_wait */
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (psock) {
449*4882a593Smuzhiyun WARN_ON(kcm->tx_wait);
450*4882a593Smuzhiyun if (unlikely(psock->tx_stopped))
451*4882a593Smuzhiyun unreserve_psock(kcm);
452*4882a593Smuzhiyun else
453*4882a593Smuzhiyun return kcm->tx_psock;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Check again under lock to see if psock was reserved for this
459*4882a593Smuzhiyun * psock via psock_unreserve.
460*4882a593Smuzhiyun */
461*4882a593Smuzhiyun psock = kcm->tx_psock;
462*4882a593Smuzhiyun if (unlikely(psock)) {
463*4882a593Smuzhiyun WARN_ON(kcm->tx_wait);
464*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
465*4882a593Smuzhiyun return kcm->tx_psock;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun if (!list_empty(&mux->psocks_avail)) {
469*4882a593Smuzhiyun psock = list_first_entry(&mux->psocks_avail,
470*4882a593Smuzhiyun struct kcm_psock,
471*4882a593Smuzhiyun psock_avail_list);
472*4882a593Smuzhiyun list_del(&psock->psock_avail_list);
473*4882a593Smuzhiyun if (kcm->tx_wait) {
474*4882a593Smuzhiyun list_del(&kcm->wait_psock_list);
475*4882a593Smuzhiyun kcm->tx_wait = false;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun kcm->tx_psock = psock;
478*4882a593Smuzhiyun psock->tx_kcm = kcm;
479*4882a593Smuzhiyun KCM_STATS_INCR(psock->stats.reserved);
480*4882a593Smuzhiyun } else if (!kcm->tx_wait) {
481*4882a593Smuzhiyun list_add_tail(&kcm->wait_psock_list,
482*4882a593Smuzhiyun &mux->kcm_tx_waiters);
483*4882a593Smuzhiyun kcm->tx_wait = true;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun return psock;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /* mux lock held */
psock_now_avail(struct kcm_psock * psock)492*4882a593Smuzhiyun static void psock_now_avail(struct kcm_psock *psock)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun struct kcm_mux *mux = psock->mux;
495*4882a593Smuzhiyun struct kcm_sock *kcm;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (list_empty(&mux->kcm_tx_waiters)) {
498*4882a593Smuzhiyun list_add_tail(&psock->psock_avail_list,
499*4882a593Smuzhiyun &mux->psocks_avail);
500*4882a593Smuzhiyun } else {
501*4882a593Smuzhiyun kcm = list_first_entry(&mux->kcm_tx_waiters,
502*4882a593Smuzhiyun struct kcm_sock,
503*4882a593Smuzhiyun wait_psock_list);
504*4882a593Smuzhiyun list_del(&kcm->wait_psock_list);
505*4882a593Smuzhiyun kcm->tx_wait = false;
506*4882a593Smuzhiyun psock->tx_kcm = kcm;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /* Commit before changing tx_psock since that is read in
509*4882a593Smuzhiyun * reserve_psock before queuing work.
510*4882a593Smuzhiyun */
511*4882a593Smuzhiyun smp_mb();
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun kcm->tx_psock = psock;
514*4882a593Smuzhiyun KCM_STATS_INCR(psock->stats.reserved);
515*4882a593Smuzhiyun queue_work(kcm_wq, &kcm->tx_work);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /* kcm sock is locked. */
unreserve_psock(struct kcm_sock * kcm)520*4882a593Smuzhiyun static void unreserve_psock(struct kcm_sock *kcm)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun struct kcm_psock *psock;
523*4882a593Smuzhiyun struct kcm_mux *mux = kcm->mux;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun psock = kcm->tx_psock;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun if (WARN_ON(!psock)) {
530*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
531*4882a593Smuzhiyun return;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun smp_rmb(); /* Read tx_psock before tx_wait */
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun kcm_update_tx_mux_stats(mux, psock);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun WARN_ON(kcm->tx_wait);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun kcm->tx_psock = NULL;
541*4882a593Smuzhiyun psock->tx_kcm = NULL;
542*4882a593Smuzhiyun KCM_STATS_INCR(psock->stats.unreserved);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (unlikely(psock->tx_stopped)) {
545*4882a593Smuzhiyun if (psock->done) {
546*4882a593Smuzhiyun /* Deferred free */
547*4882a593Smuzhiyun list_del(&psock->psock_list);
548*4882a593Smuzhiyun mux->psocks_cnt--;
549*4882a593Smuzhiyun sock_put(psock->sk);
550*4882a593Smuzhiyun fput(psock->sk->sk_socket->file);
551*4882a593Smuzhiyun kmem_cache_free(kcm_psockp, psock);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* Don't put back on available list */
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun return;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun psock_now_avail(psock);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
kcm_report_tx_retry(struct kcm_sock * kcm)566*4882a593Smuzhiyun static void kcm_report_tx_retry(struct kcm_sock *kcm)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun struct kcm_mux *mux = kcm->mux;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
571*4882a593Smuzhiyun KCM_STATS_INCR(mux->stats.tx_retries);
572*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun /* Write any messages ready on the kcm socket. Called with kcm sock lock
576*4882a593Smuzhiyun * held. Return bytes actually sent or error.
577*4882a593Smuzhiyun */
kcm_write_msgs(struct kcm_sock * kcm)578*4882a593Smuzhiyun static int kcm_write_msgs(struct kcm_sock *kcm)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun struct sock *sk = &kcm->sk;
581*4882a593Smuzhiyun struct kcm_psock *psock;
582*4882a593Smuzhiyun struct sk_buff *skb, *head;
583*4882a593Smuzhiyun struct kcm_tx_msg *txm;
584*4882a593Smuzhiyun unsigned short fragidx, frag_offset;
585*4882a593Smuzhiyun unsigned int sent, total_sent = 0;
586*4882a593Smuzhiyun int ret = 0;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun kcm->tx_wait_more = false;
589*4882a593Smuzhiyun psock = kcm->tx_psock;
590*4882a593Smuzhiyun if (unlikely(psock && psock->tx_stopped)) {
591*4882a593Smuzhiyun /* A reserved psock was aborted asynchronously. Unreserve
592*4882a593Smuzhiyun * it and we'll retry the message.
593*4882a593Smuzhiyun */
594*4882a593Smuzhiyun unreserve_psock(kcm);
595*4882a593Smuzhiyun kcm_report_tx_retry(kcm);
596*4882a593Smuzhiyun if (skb_queue_empty(&sk->sk_write_queue))
597*4882a593Smuzhiyun return 0;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun } else if (skb_queue_empty(&sk->sk_write_queue)) {
602*4882a593Smuzhiyun return 0;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun head = skb_peek(&sk->sk_write_queue);
606*4882a593Smuzhiyun txm = kcm_tx_msg(head);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (txm->sent) {
609*4882a593Smuzhiyun /* Send of first skbuff in queue already in progress */
610*4882a593Smuzhiyun if (WARN_ON(!psock)) {
611*4882a593Smuzhiyun ret = -EINVAL;
612*4882a593Smuzhiyun goto out;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun sent = txm->sent;
615*4882a593Smuzhiyun frag_offset = txm->frag_offset;
616*4882a593Smuzhiyun fragidx = txm->fragidx;
617*4882a593Smuzhiyun skb = txm->frag_skb;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun goto do_frag;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun try_again:
623*4882a593Smuzhiyun psock = reserve_psock(kcm);
624*4882a593Smuzhiyun if (!psock)
625*4882a593Smuzhiyun goto out;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun do {
628*4882a593Smuzhiyun skb = head;
629*4882a593Smuzhiyun txm = kcm_tx_msg(head);
630*4882a593Smuzhiyun sent = 0;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun do_frag_list:
633*4882a593Smuzhiyun if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
634*4882a593Smuzhiyun ret = -EINVAL;
635*4882a593Smuzhiyun goto out;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
639*4882a593Smuzhiyun fragidx++) {
640*4882a593Smuzhiyun skb_frag_t *frag;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun frag_offset = 0;
643*4882a593Smuzhiyun do_frag:
644*4882a593Smuzhiyun frag = &skb_shinfo(skb)->frags[fragidx];
645*4882a593Smuzhiyun if (WARN_ON(!skb_frag_size(frag))) {
646*4882a593Smuzhiyun ret = -EINVAL;
647*4882a593Smuzhiyun goto out;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun ret = kernel_sendpage(psock->sk->sk_socket,
651*4882a593Smuzhiyun skb_frag_page(frag),
652*4882a593Smuzhiyun skb_frag_off(frag) + frag_offset,
653*4882a593Smuzhiyun skb_frag_size(frag) - frag_offset,
654*4882a593Smuzhiyun MSG_DONTWAIT);
655*4882a593Smuzhiyun if (ret <= 0) {
656*4882a593Smuzhiyun if (ret == -EAGAIN) {
657*4882a593Smuzhiyun /* Save state to try again when there's
658*4882a593Smuzhiyun * write space on the socket
659*4882a593Smuzhiyun */
660*4882a593Smuzhiyun txm->sent = sent;
661*4882a593Smuzhiyun txm->frag_offset = frag_offset;
662*4882a593Smuzhiyun txm->fragidx = fragidx;
663*4882a593Smuzhiyun txm->frag_skb = skb;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun ret = 0;
666*4882a593Smuzhiyun goto out;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /* Hard failure in sending message, abort this
670*4882a593Smuzhiyun * psock since it has lost framing
671*4882a593Smuzhiyun * synchonization and retry sending the
672*4882a593Smuzhiyun * message from the beginning.
673*4882a593Smuzhiyun */
674*4882a593Smuzhiyun kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
675*4882a593Smuzhiyun true);
676*4882a593Smuzhiyun unreserve_psock(kcm);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun txm->sent = 0;
679*4882a593Smuzhiyun kcm_report_tx_retry(kcm);
680*4882a593Smuzhiyun ret = 0;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun goto try_again;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun sent += ret;
686*4882a593Smuzhiyun frag_offset += ret;
687*4882a593Smuzhiyun KCM_STATS_ADD(psock->stats.tx_bytes, ret);
688*4882a593Smuzhiyun if (frag_offset < skb_frag_size(frag)) {
689*4882a593Smuzhiyun /* Not finished with this frag */
690*4882a593Smuzhiyun goto do_frag;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun if (skb == head) {
695*4882a593Smuzhiyun if (skb_has_frag_list(skb)) {
696*4882a593Smuzhiyun skb = skb_shinfo(skb)->frag_list;
697*4882a593Smuzhiyun goto do_frag_list;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun } else if (skb->next) {
700*4882a593Smuzhiyun skb = skb->next;
701*4882a593Smuzhiyun goto do_frag_list;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /* Successfully sent the whole packet, account for it. */
705*4882a593Smuzhiyun skb_dequeue(&sk->sk_write_queue);
706*4882a593Smuzhiyun kfree_skb(head);
707*4882a593Smuzhiyun sk->sk_wmem_queued -= sent;
708*4882a593Smuzhiyun total_sent += sent;
709*4882a593Smuzhiyun KCM_STATS_INCR(psock->stats.tx_msgs);
710*4882a593Smuzhiyun } while ((head = skb_peek(&sk->sk_write_queue)));
711*4882a593Smuzhiyun out:
712*4882a593Smuzhiyun if (!head) {
713*4882a593Smuzhiyun /* Done with all queued messages. */
714*4882a593Smuzhiyun WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
715*4882a593Smuzhiyun unreserve_psock(kcm);
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /* Check if write space is available */
719*4882a593Smuzhiyun sk->sk_write_space(sk);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun return total_sent ? : ret;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
kcm_tx_work(struct work_struct * w)724*4882a593Smuzhiyun static void kcm_tx_work(struct work_struct *w)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
727*4882a593Smuzhiyun struct sock *sk = &kcm->sk;
728*4882a593Smuzhiyun int err;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun lock_sock(sk);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
733*4882a593Smuzhiyun * aborts
734*4882a593Smuzhiyun */
735*4882a593Smuzhiyun err = kcm_write_msgs(kcm);
736*4882a593Smuzhiyun if (err < 0) {
737*4882a593Smuzhiyun /* Hard failure in write, report error on KCM socket */
738*4882a593Smuzhiyun pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
739*4882a593Smuzhiyun report_csk_error(&kcm->sk, -err);
740*4882a593Smuzhiyun goto out;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /* Primarily for SOCK_SEQPACKET sockets */
744*4882a593Smuzhiyun if (likely(sk->sk_socket) &&
745*4882a593Smuzhiyun test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
746*4882a593Smuzhiyun clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
747*4882a593Smuzhiyun sk->sk_write_space(sk);
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun out:
751*4882a593Smuzhiyun release_sock(sk);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
kcm_push(struct kcm_sock * kcm)754*4882a593Smuzhiyun static void kcm_push(struct kcm_sock *kcm)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun if (kcm->tx_wait_more)
757*4882a593Smuzhiyun kcm_write_msgs(kcm);
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
kcm_sendpage(struct socket * sock,struct page * page,int offset,size_t size,int flags)760*4882a593Smuzhiyun static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
761*4882a593Smuzhiyun int offset, size_t size, int flags)
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun struct sock *sk = sock->sk;
765*4882a593Smuzhiyun struct kcm_sock *kcm = kcm_sk(sk);
766*4882a593Smuzhiyun struct sk_buff *skb = NULL, *head = NULL;
767*4882a593Smuzhiyun long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
768*4882a593Smuzhiyun bool eor;
769*4882a593Smuzhiyun int err = 0;
770*4882a593Smuzhiyun int i;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun if (flags & MSG_SENDPAGE_NOTLAST)
773*4882a593Smuzhiyun flags |= MSG_MORE;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /* No MSG_EOR from splice, only look at MSG_MORE */
776*4882a593Smuzhiyun eor = !(flags & MSG_MORE);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun lock_sock(sk);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun err = -EPIPE;
783*4882a593Smuzhiyun if (sk->sk_err)
784*4882a593Smuzhiyun goto out_error;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (kcm->seq_skb) {
787*4882a593Smuzhiyun /* Previously opened message */
788*4882a593Smuzhiyun head = kcm->seq_skb;
789*4882a593Smuzhiyun skb = kcm_tx_msg(head)->last_skb;
790*4882a593Smuzhiyun i = skb_shinfo(skb)->nr_frags;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun if (skb_can_coalesce(skb, i, page, offset)) {
793*4882a593Smuzhiyun skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
794*4882a593Smuzhiyun skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
795*4882a593Smuzhiyun goto coalesced;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (i >= MAX_SKB_FRAGS) {
799*4882a593Smuzhiyun struct sk_buff *tskb;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun tskb = alloc_skb(0, sk->sk_allocation);
802*4882a593Smuzhiyun while (!tskb) {
803*4882a593Smuzhiyun kcm_push(kcm);
804*4882a593Smuzhiyun err = sk_stream_wait_memory(sk, &timeo);
805*4882a593Smuzhiyun if (err)
806*4882a593Smuzhiyun goto out_error;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (head == skb)
810*4882a593Smuzhiyun skb_shinfo(head)->frag_list = tskb;
811*4882a593Smuzhiyun else
812*4882a593Smuzhiyun skb->next = tskb;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun skb = tskb;
815*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
816*4882a593Smuzhiyun i = 0;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun } else {
819*4882a593Smuzhiyun /* Call the sk_stream functions to manage the sndbuf mem. */
820*4882a593Smuzhiyun if (!sk_stream_memory_free(sk)) {
821*4882a593Smuzhiyun kcm_push(kcm);
822*4882a593Smuzhiyun set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
823*4882a593Smuzhiyun err = sk_stream_wait_memory(sk, &timeo);
824*4882a593Smuzhiyun if (err)
825*4882a593Smuzhiyun goto out_error;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun head = alloc_skb(0, sk->sk_allocation);
829*4882a593Smuzhiyun while (!head) {
830*4882a593Smuzhiyun kcm_push(kcm);
831*4882a593Smuzhiyun err = sk_stream_wait_memory(sk, &timeo);
832*4882a593Smuzhiyun if (err)
833*4882a593Smuzhiyun goto out_error;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun skb = head;
837*4882a593Smuzhiyun i = 0;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun get_page(page);
841*4882a593Smuzhiyun skb_fill_page_desc(skb, i, page, offset, size);
842*4882a593Smuzhiyun skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun coalesced:
845*4882a593Smuzhiyun skb->len += size;
846*4882a593Smuzhiyun skb->data_len += size;
847*4882a593Smuzhiyun skb->truesize += size;
848*4882a593Smuzhiyun sk->sk_wmem_queued += size;
849*4882a593Smuzhiyun sk_mem_charge(sk, size);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun if (head != skb) {
852*4882a593Smuzhiyun head->len += size;
853*4882a593Smuzhiyun head->data_len += size;
854*4882a593Smuzhiyun head->truesize += size;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun if (eor) {
858*4882a593Smuzhiyun bool not_busy = skb_queue_empty(&sk->sk_write_queue);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /* Message complete, queue it on send buffer */
861*4882a593Smuzhiyun __skb_queue_tail(&sk->sk_write_queue, head);
862*4882a593Smuzhiyun kcm->seq_skb = NULL;
863*4882a593Smuzhiyun KCM_STATS_INCR(kcm->stats.tx_msgs);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (flags & MSG_BATCH) {
866*4882a593Smuzhiyun kcm->tx_wait_more = true;
867*4882a593Smuzhiyun } else if (kcm->tx_wait_more || not_busy) {
868*4882a593Smuzhiyun err = kcm_write_msgs(kcm);
869*4882a593Smuzhiyun if (err < 0) {
870*4882a593Smuzhiyun /* We got a hard error in write_msgs but have
871*4882a593Smuzhiyun * already queued this message. Report an error
872*4882a593Smuzhiyun * in the socket, but don't affect return value
873*4882a593Smuzhiyun * from sendmsg
874*4882a593Smuzhiyun */
875*4882a593Smuzhiyun pr_warn("KCM: Hard failure on kcm_write_msgs\n");
876*4882a593Smuzhiyun report_csk_error(&kcm->sk, -err);
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun } else {
880*4882a593Smuzhiyun /* Message not complete, save state */
881*4882a593Smuzhiyun kcm->seq_skb = head;
882*4882a593Smuzhiyun kcm_tx_msg(head)->last_skb = skb;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun KCM_STATS_ADD(kcm->stats.tx_bytes, size);
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun release_sock(sk);
888*4882a593Smuzhiyun return size;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun out_error:
891*4882a593Smuzhiyun kcm_push(kcm);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun err = sk_stream_error(sk, flags, err);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun /* make sure we wake any epoll edge trigger waiter */
896*4882a593Smuzhiyun if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
897*4882a593Smuzhiyun sk->sk_write_space(sk);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun release_sock(sk);
900*4882a593Smuzhiyun return err;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
kcm_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)903*4882a593Smuzhiyun static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun struct sock *sk = sock->sk;
906*4882a593Smuzhiyun struct kcm_sock *kcm = kcm_sk(sk);
907*4882a593Smuzhiyun struct sk_buff *skb = NULL, *head = NULL;
908*4882a593Smuzhiyun size_t copy, copied = 0;
909*4882a593Smuzhiyun long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
910*4882a593Smuzhiyun int eor = (sock->type == SOCK_DGRAM) ?
911*4882a593Smuzhiyun !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
912*4882a593Smuzhiyun int err = -EPIPE;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun lock_sock(sk);
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun /* Per tcp_sendmsg this should be in poll */
917*4882a593Smuzhiyun sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (sk->sk_err)
920*4882a593Smuzhiyun goto out_error;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun if (kcm->seq_skb) {
923*4882a593Smuzhiyun /* Previously opened message */
924*4882a593Smuzhiyun head = kcm->seq_skb;
925*4882a593Smuzhiyun skb = kcm_tx_msg(head)->last_skb;
926*4882a593Smuzhiyun goto start;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun /* Call the sk_stream functions to manage the sndbuf mem. */
930*4882a593Smuzhiyun if (!sk_stream_memory_free(sk)) {
931*4882a593Smuzhiyun kcm_push(kcm);
932*4882a593Smuzhiyun set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
933*4882a593Smuzhiyun err = sk_stream_wait_memory(sk, &timeo);
934*4882a593Smuzhiyun if (err)
935*4882a593Smuzhiyun goto out_error;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun if (msg_data_left(msg)) {
939*4882a593Smuzhiyun /* New message, alloc head skb */
940*4882a593Smuzhiyun head = alloc_skb(0, sk->sk_allocation);
941*4882a593Smuzhiyun while (!head) {
942*4882a593Smuzhiyun kcm_push(kcm);
943*4882a593Smuzhiyun err = sk_stream_wait_memory(sk, &timeo);
944*4882a593Smuzhiyun if (err)
945*4882a593Smuzhiyun goto out_error;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun head = alloc_skb(0, sk->sk_allocation);
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun skb = head;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
953*4882a593Smuzhiyun * csum_and_copy_from_iter from skb_do_copy_data_nocache.
954*4882a593Smuzhiyun */
955*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun start:
959*4882a593Smuzhiyun while (msg_data_left(msg)) {
960*4882a593Smuzhiyun bool merge = true;
961*4882a593Smuzhiyun int i = skb_shinfo(skb)->nr_frags;
962*4882a593Smuzhiyun struct page_frag *pfrag = sk_page_frag(sk);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun if (!sk_page_frag_refill(sk, pfrag))
965*4882a593Smuzhiyun goto wait_for_memory;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun if (!skb_can_coalesce(skb, i, pfrag->page,
968*4882a593Smuzhiyun pfrag->offset)) {
969*4882a593Smuzhiyun if (i == MAX_SKB_FRAGS) {
970*4882a593Smuzhiyun struct sk_buff *tskb;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun tskb = alloc_skb(0, sk->sk_allocation);
973*4882a593Smuzhiyun if (!tskb)
974*4882a593Smuzhiyun goto wait_for_memory;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun if (head == skb)
977*4882a593Smuzhiyun skb_shinfo(head)->frag_list = tskb;
978*4882a593Smuzhiyun else
979*4882a593Smuzhiyun skb->next = tskb;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun skb = tskb;
982*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
983*4882a593Smuzhiyun continue;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun merge = false;
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun copy = min_t(int, msg_data_left(msg),
989*4882a593Smuzhiyun pfrag->size - pfrag->offset);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun if (!sk_wmem_schedule(sk, copy))
992*4882a593Smuzhiyun goto wait_for_memory;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
995*4882a593Smuzhiyun pfrag->page,
996*4882a593Smuzhiyun pfrag->offset,
997*4882a593Smuzhiyun copy);
998*4882a593Smuzhiyun if (err)
999*4882a593Smuzhiyun goto out_error;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /* Update the skb. */
1002*4882a593Smuzhiyun if (merge) {
1003*4882a593Smuzhiyun skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1004*4882a593Smuzhiyun } else {
1005*4882a593Smuzhiyun skb_fill_page_desc(skb, i, pfrag->page,
1006*4882a593Smuzhiyun pfrag->offset, copy);
1007*4882a593Smuzhiyun get_page(pfrag->page);
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun pfrag->offset += copy;
1011*4882a593Smuzhiyun copied += copy;
1012*4882a593Smuzhiyun if (head != skb) {
1013*4882a593Smuzhiyun head->len += copy;
1014*4882a593Smuzhiyun head->data_len += copy;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun continue;
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun wait_for_memory:
1020*4882a593Smuzhiyun kcm_push(kcm);
1021*4882a593Smuzhiyun err = sk_stream_wait_memory(sk, &timeo);
1022*4882a593Smuzhiyun if (err)
1023*4882a593Smuzhiyun goto out_error;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun if (eor) {
1027*4882a593Smuzhiyun bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun if (head) {
1030*4882a593Smuzhiyun /* Message complete, queue it on send buffer */
1031*4882a593Smuzhiyun __skb_queue_tail(&sk->sk_write_queue, head);
1032*4882a593Smuzhiyun kcm->seq_skb = NULL;
1033*4882a593Smuzhiyun KCM_STATS_INCR(kcm->stats.tx_msgs);
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun if (msg->msg_flags & MSG_BATCH) {
1037*4882a593Smuzhiyun kcm->tx_wait_more = true;
1038*4882a593Smuzhiyun } else if (kcm->tx_wait_more || not_busy) {
1039*4882a593Smuzhiyun err = kcm_write_msgs(kcm);
1040*4882a593Smuzhiyun if (err < 0) {
1041*4882a593Smuzhiyun /* We got a hard error in write_msgs but have
1042*4882a593Smuzhiyun * already queued this message. Report an error
1043*4882a593Smuzhiyun * in the socket, but don't affect return value
1044*4882a593Smuzhiyun * from sendmsg
1045*4882a593Smuzhiyun */
1046*4882a593Smuzhiyun pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1047*4882a593Smuzhiyun report_csk_error(&kcm->sk, -err);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun } else {
1051*4882a593Smuzhiyun /* Message not complete, save state */
1052*4882a593Smuzhiyun partial_message:
1053*4882a593Smuzhiyun if (head) {
1054*4882a593Smuzhiyun kcm->seq_skb = head;
1055*4882a593Smuzhiyun kcm_tx_msg(head)->last_skb = skb;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun release_sock(sk);
1062*4882a593Smuzhiyun return copied;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun out_error:
1065*4882a593Smuzhiyun kcm_push(kcm);
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun if (copied && sock->type == SOCK_SEQPACKET) {
1068*4882a593Smuzhiyun /* Wrote some bytes before encountering an
1069*4882a593Smuzhiyun * error, return partial success.
1070*4882a593Smuzhiyun */
1071*4882a593Smuzhiyun goto partial_message;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun if (head != kcm->seq_skb)
1075*4882a593Smuzhiyun kfree_skb(head);
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun err = sk_stream_error(sk, msg->msg_flags, err);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun /* make sure we wake any epoll edge trigger waiter */
1080*4882a593Smuzhiyun if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1081*4882a593Smuzhiyun sk->sk_write_space(sk);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun release_sock(sk);
1084*4882a593Smuzhiyun return err;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
kcm_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1087*4882a593Smuzhiyun static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1088*4882a593Smuzhiyun size_t len, int flags)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun int noblock = flags & MSG_DONTWAIT;
1091*4882a593Smuzhiyun struct sock *sk = sock->sk;
1092*4882a593Smuzhiyun struct kcm_sock *kcm = kcm_sk(sk);
1093*4882a593Smuzhiyun int err = 0;
1094*4882a593Smuzhiyun struct strp_msg *stm;
1095*4882a593Smuzhiyun int copied = 0;
1096*4882a593Smuzhiyun struct sk_buff *skb;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun skb = skb_recv_datagram(sk, flags, noblock, &err);
1099*4882a593Smuzhiyun if (!skb)
1100*4882a593Smuzhiyun goto out;
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun /* Okay, have a message on the receive queue */
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun stm = strp_msg(skb);
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun if (len > stm->full_len)
1107*4882a593Smuzhiyun len = stm->full_len;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
1110*4882a593Smuzhiyun if (err < 0)
1111*4882a593Smuzhiyun goto out;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun copied = len;
1114*4882a593Smuzhiyun if (likely(!(flags & MSG_PEEK))) {
1115*4882a593Smuzhiyun KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1116*4882a593Smuzhiyun if (copied < stm->full_len) {
1117*4882a593Smuzhiyun if (sock->type == SOCK_DGRAM) {
1118*4882a593Smuzhiyun /* Truncated message */
1119*4882a593Smuzhiyun msg->msg_flags |= MSG_TRUNC;
1120*4882a593Smuzhiyun goto msg_finished;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun stm->offset += copied;
1123*4882a593Smuzhiyun stm->full_len -= copied;
1124*4882a593Smuzhiyun } else {
1125*4882a593Smuzhiyun msg_finished:
1126*4882a593Smuzhiyun /* Finished with message */
1127*4882a593Smuzhiyun msg->msg_flags |= MSG_EOR;
1128*4882a593Smuzhiyun KCM_STATS_INCR(kcm->stats.rx_msgs);
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun out:
1133*4882a593Smuzhiyun skb_free_datagram(sk, skb);
1134*4882a593Smuzhiyun return copied ? : err;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun
kcm_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1137*4882a593Smuzhiyun static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1138*4882a593Smuzhiyun struct pipe_inode_info *pipe, size_t len,
1139*4882a593Smuzhiyun unsigned int flags)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun int noblock = flags & MSG_DONTWAIT;
1142*4882a593Smuzhiyun struct sock *sk = sock->sk;
1143*4882a593Smuzhiyun struct kcm_sock *kcm = kcm_sk(sk);
1144*4882a593Smuzhiyun struct strp_msg *stm;
1145*4882a593Smuzhiyun int err = 0;
1146*4882a593Smuzhiyun ssize_t copied;
1147*4882a593Smuzhiyun struct sk_buff *skb;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun /* Only support splice for SOCKSEQPACKET */
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun skb = skb_recv_datagram(sk, flags, noblock, &err);
1152*4882a593Smuzhiyun if (!skb)
1153*4882a593Smuzhiyun goto err_out;
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun /* Okay, have a message on the receive queue */
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun stm = strp_msg(skb);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun if (len > stm->full_len)
1160*4882a593Smuzhiyun len = stm->full_len;
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
1163*4882a593Smuzhiyun if (copied < 0) {
1164*4882a593Smuzhiyun err = copied;
1165*4882a593Smuzhiyun goto err_out;
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun stm->offset += copied;
1171*4882a593Smuzhiyun stm->full_len -= copied;
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun /* We have no way to return MSG_EOR. If all the bytes have been
1174*4882a593Smuzhiyun * read we still leave the message in the receive socket buffer.
1175*4882a593Smuzhiyun * A subsequent recvmsg needs to be done to return MSG_EOR and
1176*4882a593Smuzhiyun * finish reading the message.
1177*4882a593Smuzhiyun */
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun skb_free_datagram(sk, skb);
1180*4882a593Smuzhiyun return copied;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun err_out:
1183*4882a593Smuzhiyun skb_free_datagram(sk, skb);
1184*4882a593Smuzhiyun return err;
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun /* kcm sock lock held */
kcm_recv_disable(struct kcm_sock * kcm)1188*4882a593Smuzhiyun static void kcm_recv_disable(struct kcm_sock *kcm)
1189*4882a593Smuzhiyun {
1190*4882a593Smuzhiyun struct kcm_mux *mux = kcm->mux;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun if (kcm->rx_disabled)
1193*4882a593Smuzhiyun return;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun spin_lock_bh(&mux->rx_lock);
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun kcm->rx_disabled = 1;
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun /* If a psock is reserved we'll do cleanup in unreserve */
1200*4882a593Smuzhiyun if (!kcm->rx_psock) {
1201*4882a593Smuzhiyun if (kcm->rx_wait) {
1202*4882a593Smuzhiyun list_del(&kcm->wait_rx_list);
1203*4882a593Smuzhiyun /* paired with lockless reads in kcm_rfree() */
1204*4882a593Smuzhiyun WRITE_ONCE(kcm->rx_wait, false);
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun /* kcm sock lock held */
kcm_recv_enable(struct kcm_sock * kcm)1214*4882a593Smuzhiyun static void kcm_recv_enable(struct kcm_sock *kcm)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun struct kcm_mux *mux = kcm->mux;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun if (!kcm->rx_disabled)
1219*4882a593Smuzhiyun return;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun spin_lock_bh(&mux->rx_lock);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun kcm->rx_disabled = 0;
1224*4882a593Smuzhiyun kcm_rcv_ready(kcm);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun
kcm_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1229*4882a593Smuzhiyun static int kcm_setsockopt(struct socket *sock, int level, int optname,
1230*4882a593Smuzhiyun sockptr_t optval, unsigned int optlen)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun struct kcm_sock *kcm = kcm_sk(sock->sk);
1233*4882a593Smuzhiyun int val, valbool;
1234*4882a593Smuzhiyun int err = 0;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun if (level != SOL_KCM)
1237*4882a593Smuzhiyun return -ENOPROTOOPT;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun if (optlen < sizeof(int))
1240*4882a593Smuzhiyun return -EINVAL;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun if (copy_from_sockptr(&val, optval, sizeof(int)))
1243*4882a593Smuzhiyun return -EFAULT;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun valbool = val ? 1 : 0;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun switch (optname) {
1248*4882a593Smuzhiyun case KCM_RECV_DISABLE:
1249*4882a593Smuzhiyun lock_sock(&kcm->sk);
1250*4882a593Smuzhiyun if (valbool)
1251*4882a593Smuzhiyun kcm_recv_disable(kcm);
1252*4882a593Smuzhiyun else
1253*4882a593Smuzhiyun kcm_recv_enable(kcm);
1254*4882a593Smuzhiyun release_sock(&kcm->sk);
1255*4882a593Smuzhiyun break;
1256*4882a593Smuzhiyun default:
1257*4882a593Smuzhiyun err = -ENOPROTOOPT;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun return err;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun
kcm_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1263*4882a593Smuzhiyun static int kcm_getsockopt(struct socket *sock, int level, int optname,
1264*4882a593Smuzhiyun char __user *optval, int __user *optlen)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun struct kcm_sock *kcm = kcm_sk(sock->sk);
1267*4882a593Smuzhiyun int val, len;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun if (level != SOL_KCM)
1270*4882a593Smuzhiyun return -ENOPROTOOPT;
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun if (get_user(len, optlen))
1273*4882a593Smuzhiyun return -EFAULT;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun len = min_t(unsigned int, len, sizeof(int));
1276*4882a593Smuzhiyun if (len < 0)
1277*4882a593Smuzhiyun return -EINVAL;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun switch (optname) {
1280*4882a593Smuzhiyun case KCM_RECV_DISABLE:
1281*4882a593Smuzhiyun val = kcm->rx_disabled;
1282*4882a593Smuzhiyun break;
1283*4882a593Smuzhiyun default:
1284*4882a593Smuzhiyun return -ENOPROTOOPT;
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun if (put_user(len, optlen))
1288*4882a593Smuzhiyun return -EFAULT;
1289*4882a593Smuzhiyun if (copy_to_user(optval, &val, len))
1290*4882a593Smuzhiyun return -EFAULT;
1291*4882a593Smuzhiyun return 0;
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun
init_kcm_sock(struct kcm_sock * kcm,struct kcm_mux * mux)1294*4882a593Smuzhiyun static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun struct kcm_sock *tkcm;
1297*4882a593Smuzhiyun struct list_head *head;
1298*4882a593Smuzhiyun int index = 0;
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1301*4882a593Smuzhiyun * we set sk_state, otherwise epoll_wait always returns right away with
1302*4882a593Smuzhiyun * EPOLLHUP
1303*4882a593Smuzhiyun */
1304*4882a593Smuzhiyun kcm->sk.sk_state = TCP_ESTABLISHED;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun /* Add to mux's kcm sockets list */
1307*4882a593Smuzhiyun kcm->mux = mux;
1308*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun head = &mux->kcm_socks;
1311*4882a593Smuzhiyun list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1312*4882a593Smuzhiyun if (tkcm->index != index)
1313*4882a593Smuzhiyun break;
1314*4882a593Smuzhiyun head = &tkcm->kcm_sock_list;
1315*4882a593Smuzhiyun index++;
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun list_add(&kcm->kcm_sock_list, head);
1319*4882a593Smuzhiyun kcm->index = index;
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun mux->kcm_socks_cnt++;
1322*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun INIT_WORK(&kcm->tx_work, kcm_tx_work);
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun spin_lock_bh(&mux->rx_lock);
1327*4882a593Smuzhiyun kcm_rcv_ready(kcm);
1328*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun
kcm_attach(struct socket * sock,struct socket * csock,struct bpf_prog * prog)1331*4882a593Smuzhiyun static int kcm_attach(struct socket *sock, struct socket *csock,
1332*4882a593Smuzhiyun struct bpf_prog *prog)
1333*4882a593Smuzhiyun {
1334*4882a593Smuzhiyun struct kcm_sock *kcm = kcm_sk(sock->sk);
1335*4882a593Smuzhiyun struct kcm_mux *mux = kcm->mux;
1336*4882a593Smuzhiyun struct sock *csk;
1337*4882a593Smuzhiyun struct kcm_psock *psock = NULL, *tpsock;
1338*4882a593Smuzhiyun struct list_head *head;
1339*4882a593Smuzhiyun int index = 0;
1340*4882a593Smuzhiyun static const struct strp_callbacks cb = {
1341*4882a593Smuzhiyun .rcv_msg = kcm_rcv_strparser,
1342*4882a593Smuzhiyun .parse_msg = kcm_parse_func_strparser,
1343*4882a593Smuzhiyun .read_sock_done = kcm_read_sock_done,
1344*4882a593Smuzhiyun };
1345*4882a593Smuzhiyun int err = 0;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun csk = csock->sk;
1348*4882a593Smuzhiyun if (!csk)
1349*4882a593Smuzhiyun return -EINVAL;
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun lock_sock(csk);
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun /* Only allow TCP sockets to be attached for now */
1354*4882a593Smuzhiyun if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1355*4882a593Smuzhiyun csk->sk_protocol != IPPROTO_TCP) {
1356*4882a593Smuzhiyun err = -EOPNOTSUPP;
1357*4882a593Smuzhiyun goto out;
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun /* Don't allow listeners or closed sockets */
1361*4882a593Smuzhiyun if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1362*4882a593Smuzhiyun err = -EOPNOTSUPP;
1363*4882a593Smuzhiyun goto out;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1367*4882a593Smuzhiyun if (!psock) {
1368*4882a593Smuzhiyun err = -ENOMEM;
1369*4882a593Smuzhiyun goto out;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun psock->mux = mux;
1373*4882a593Smuzhiyun psock->sk = csk;
1374*4882a593Smuzhiyun psock->bpf_prog = prog;
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun write_lock_bh(&csk->sk_callback_lock);
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun /* Check if sk_user_data is aready by KCM or someone else.
1379*4882a593Smuzhiyun * Must be done under lock to prevent race conditions.
1380*4882a593Smuzhiyun */
1381*4882a593Smuzhiyun if (csk->sk_user_data) {
1382*4882a593Smuzhiyun write_unlock_bh(&csk->sk_callback_lock);
1383*4882a593Smuzhiyun kmem_cache_free(kcm_psockp, psock);
1384*4882a593Smuzhiyun err = -EALREADY;
1385*4882a593Smuzhiyun goto out;
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun err = strp_init(&psock->strp, csk, &cb);
1389*4882a593Smuzhiyun if (err) {
1390*4882a593Smuzhiyun write_unlock_bh(&csk->sk_callback_lock);
1391*4882a593Smuzhiyun kmem_cache_free(kcm_psockp, psock);
1392*4882a593Smuzhiyun goto out;
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun psock->save_data_ready = csk->sk_data_ready;
1396*4882a593Smuzhiyun psock->save_write_space = csk->sk_write_space;
1397*4882a593Smuzhiyun psock->save_state_change = csk->sk_state_change;
1398*4882a593Smuzhiyun csk->sk_user_data = psock;
1399*4882a593Smuzhiyun csk->sk_data_ready = psock_data_ready;
1400*4882a593Smuzhiyun csk->sk_write_space = psock_write_space;
1401*4882a593Smuzhiyun csk->sk_state_change = psock_state_change;
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun write_unlock_bh(&csk->sk_callback_lock);
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun sock_hold(csk);
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun /* Finished initialization, now add the psock to the MUX. */
1408*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
1409*4882a593Smuzhiyun head = &mux->psocks;
1410*4882a593Smuzhiyun list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1411*4882a593Smuzhiyun if (tpsock->index != index)
1412*4882a593Smuzhiyun break;
1413*4882a593Smuzhiyun head = &tpsock->psock_list;
1414*4882a593Smuzhiyun index++;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun list_add(&psock->psock_list, head);
1418*4882a593Smuzhiyun psock->index = index;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun KCM_STATS_INCR(mux->stats.psock_attach);
1421*4882a593Smuzhiyun mux->psocks_cnt++;
1422*4882a593Smuzhiyun psock_now_avail(psock);
1423*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun /* Schedule RX work in case there are already bytes queued */
1426*4882a593Smuzhiyun strp_check_rcv(&psock->strp);
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun out:
1429*4882a593Smuzhiyun release_sock(csk);
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun return err;
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
kcm_attach_ioctl(struct socket * sock,struct kcm_attach * info)1434*4882a593Smuzhiyun static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1435*4882a593Smuzhiyun {
1436*4882a593Smuzhiyun struct socket *csock;
1437*4882a593Smuzhiyun struct bpf_prog *prog;
1438*4882a593Smuzhiyun int err;
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun csock = sockfd_lookup(info->fd, &err);
1441*4882a593Smuzhiyun if (!csock)
1442*4882a593Smuzhiyun return -ENOENT;
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1445*4882a593Smuzhiyun if (IS_ERR(prog)) {
1446*4882a593Smuzhiyun err = PTR_ERR(prog);
1447*4882a593Smuzhiyun goto out;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun err = kcm_attach(sock, csock, prog);
1451*4882a593Smuzhiyun if (err) {
1452*4882a593Smuzhiyun bpf_prog_put(prog);
1453*4882a593Smuzhiyun goto out;
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun /* Keep reference on file also */
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun return 0;
1459*4882a593Smuzhiyun out:
1460*4882a593Smuzhiyun fput(csock->file);
1461*4882a593Smuzhiyun return err;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun
kcm_unattach(struct kcm_psock * psock)1464*4882a593Smuzhiyun static void kcm_unattach(struct kcm_psock *psock)
1465*4882a593Smuzhiyun {
1466*4882a593Smuzhiyun struct sock *csk = psock->sk;
1467*4882a593Smuzhiyun struct kcm_mux *mux = psock->mux;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun lock_sock(csk);
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun /* Stop getting callbacks from TCP socket. After this there should
1472*4882a593Smuzhiyun * be no way to reserve a kcm for this psock.
1473*4882a593Smuzhiyun */
1474*4882a593Smuzhiyun write_lock_bh(&csk->sk_callback_lock);
1475*4882a593Smuzhiyun csk->sk_user_data = NULL;
1476*4882a593Smuzhiyun csk->sk_data_ready = psock->save_data_ready;
1477*4882a593Smuzhiyun csk->sk_write_space = psock->save_write_space;
1478*4882a593Smuzhiyun csk->sk_state_change = psock->save_state_change;
1479*4882a593Smuzhiyun strp_stop(&psock->strp);
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun if (WARN_ON(psock->rx_kcm)) {
1482*4882a593Smuzhiyun write_unlock_bh(&csk->sk_callback_lock);
1483*4882a593Smuzhiyun release_sock(csk);
1484*4882a593Smuzhiyun return;
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun
1487*4882a593Smuzhiyun spin_lock_bh(&mux->rx_lock);
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun /* Stop receiver activities. After this point psock should not be
1490*4882a593Smuzhiyun * able to get onto ready list either through callbacks or work.
1491*4882a593Smuzhiyun */
1492*4882a593Smuzhiyun if (psock->ready_rx_msg) {
1493*4882a593Smuzhiyun list_del(&psock->psock_ready_list);
1494*4882a593Smuzhiyun kfree_skb(psock->ready_rx_msg);
1495*4882a593Smuzhiyun psock->ready_rx_msg = NULL;
1496*4882a593Smuzhiyun KCM_STATS_INCR(mux->stats.rx_ready_drops);
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun write_unlock_bh(&csk->sk_callback_lock);
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun /* Call strp_done without sock lock */
1504*4882a593Smuzhiyun release_sock(csk);
1505*4882a593Smuzhiyun strp_done(&psock->strp);
1506*4882a593Smuzhiyun lock_sock(csk);
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun bpf_prog_put(psock->bpf_prog);
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1513*4882a593Smuzhiyun save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun KCM_STATS_INCR(mux->stats.psock_unattach);
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun if (psock->tx_kcm) {
1518*4882a593Smuzhiyun /* psock was reserved. Just mark it finished and we will clean
1519*4882a593Smuzhiyun * up in the kcm paths, we need kcm lock which can not be
1520*4882a593Smuzhiyun * acquired here.
1521*4882a593Smuzhiyun */
1522*4882a593Smuzhiyun KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1523*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun /* We are unattaching a socket that is reserved. Abort the
1526*4882a593Smuzhiyun * socket since we may be out of sync in sending on it. We need
1527*4882a593Smuzhiyun * to do this without the mux lock.
1528*4882a593Smuzhiyun */
1529*4882a593Smuzhiyun kcm_abort_tx_psock(psock, EPIPE, false);
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
1532*4882a593Smuzhiyun if (!psock->tx_kcm) {
1533*4882a593Smuzhiyun /* psock now unreserved in window mux was unlocked */
1534*4882a593Smuzhiyun goto no_reserved;
1535*4882a593Smuzhiyun }
1536*4882a593Smuzhiyun psock->done = 1;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun /* Commit done before queuing work to process it */
1539*4882a593Smuzhiyun smp_mb();
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun /* Queue tx work to make sure psock->done is handled */
1542*4882a593Smuzhiyun queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1543*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
1544*4882a593Smuzhiyun } else {
1545*4882a593Smuzhiyun no_reserved:
1546*4882a593Smuzhiyun if (!psock->tx_stopped)
1547*4882a593Smuzhiyun list_del(&psock->psock_avail_list);
1548*4882a593Smuzhiyun list_del(&psock->psock_list);
1549*4882a593Smuzhiyun mux->psocks_cnt--;
1550*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun sock_put(csk);
1553*4882a593Smuzhiyun fput(csk->sk_socket->file);
1554*4882a593Smuzhiyun kmem_cache_free(kcm_psockp, psock);
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun release_sock(csk);
1558*4882a593Smuzhiyun }
1559*4882a593Smuzhiyun
kcm_unattach_ioctl(struct socket * sock,struct kcm_unattach * info)1560*4882a593Smuzhiyun static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1561*4882a593Smuzhiyun {
1562*4882a593Smuzhiyun struct kcm_sock *kcm = kcm_sk(sock->sk);
1563*4882a593Smuzhiyun struct kcm_mux *mux = kcm->mux;
1564*4882a593Smuzhiyun struct kcm_psock *psock;
1565*4882a593Smuzhiyun struct socket *csock;
1566*4882a593Smuzhiyun struct sock *csk;
1567*4882a593Smuzhiyun int err;
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun csock = sockfd_lookup(info->fd, &err);
1570*4882a593Smuzhiyun if (!csock)
1571*4882a593Smuzhiyun return -ENOENT;
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun csk = csock->sk;
1574*4882a593Smuzhiyun if (!csk) {
1575*4882a593Smuzhiyun err = -EINVAL;
1576*4882a593Smuzhiyun goto out;
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun err = -ENOENT;
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun list_for_each_entry(psock, &mux->psocks, psock_list) {
1584*4882a593Smuzhiyun if (psock->sk != csk)
1585*4882a593Smuzhiyun continue;
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun /* Found the matching psock */
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun if (psock->unattaching || WARN_ON(psock->done)) {
1590*4882a593Smuzhiyun err = -EALREADY;
1591*4882a593Smuzhiyun break;
1592*4882a593Smuzhiyun }
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun psock->unattaching = 1;
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun /* Lower socket lock should already be held */
1599*4882a593Smuzhiyun kcm_unattach(psock);
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun err = 0;
1602*4882a593Smuzhiyun goto out;
1603*4882a593Smuzhiyun }
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun out:
1608*4882a593Smuzhiyun fput(csock->file);
1609*4882a593Smuzhiyun return err;
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun static struct proto kcm_proto = {
1613*4882a593Smuzhiyun .name = "KCM",
1614*4882a593Smuzhiyun .owner = THIS_MODULE,
1615*4882a593Smuzhiyun .obj_size = sizeof(struct kcm_sock),
1616*4882a593Smuzhiyun };
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun /* Clone a kcm socket. */
kcm_clone(struct socket * osock)1619*4882a593Smuzhiyun static struct file *kcm_clone(struct socket *osock)
1620*4882a593Smuzhiyun {
1621*4882a593Smuzhiyun struct socket *newsock;
1622*4882a593Smuzhiyun struct sock *newsk;
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun newsock = sock_alloc();
1625*4882a593Smuzhiyun if (!newsock)
1626*4882a593Smuzhiyun return ERR_PTR(-ENFILE);
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun newsock->type = osock->type;
1629*4882a593Smuzhiyun newsock->ops = osock->ops;
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun __module_get(newsock->ops->owner);
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1634*4882a593Smuzhiyun &kcm_proto, false);
1635*4882a593Smuzhiyun if (!newsk) {
1636*4882a593Smuzhiyun sock_release(newsock);
1637*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun sock_init_data(newsock, newsk);
1640*4882a593Smuzhiyun init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun
kcm_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1645*4882a593Smuzhiyun static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1646*4882a593Smuzhiyun {
1647*4882a593Smuzhiyun int err;
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun switch (cmd) {
1650*4882a593Smuzhiyun case SIOCKCMATTACH: {
1651*4882a593Smuzhiyun struct kcm_attach info;
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1654*4882a593Smuzhiyun return -EFAULT;
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun err = kcm_attach_ioctl(sock, &info);
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun break;
1659*4882a593Smuzhiyun }
1660*4882a593Smuzhiyun case SIOCKCMUNATTACH: {
1661*4882a593Smuzhiyun struct kcm_unattach info;
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1664*4882a593Smuzhiyun return -EFAULT;
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun err = kcm_unattach_ioctl(sock, &info);
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun break;
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun case SIOCKCMCLONE: {
1671*4882a593Smuzhiyun struct kcm_clone info;
1672*4882a593Smuzhiyun struct file *file;
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun info.fd = get_unused_fd_flags(0);
1675*4882a593Smuzhiyun if (unlikely(info.fd < 0))
1676*4882a593Smuzhiyun return info.fd;
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun file = kcm_clone(sock);
1679*4882a593Smuzhiyun if (IS_ERR(file)) {
1680*4882a593Smuzhiyun put_unused_fd(info.fd);
1681*4882a593Smuzhiyun return PTR_ERR(file);
1682*4882a593Smuzhiyun }
1683*4882a593Smuzhiyun if (copy_to_user((void __user *)arg, &info,
1684*4882a593Smuzhiyun sizeof(info))) {
1685*4882a593Smuzhiyun put_unused_fd(info.fd);
1686*4882a593Smuzhiyun fput(file);
1687*4882a593Smuzhiyun return -EFAULT;
1688*4882a593Smuzhiyun }
1689*4882a593Smuzhiyun fd_install(info.fd, file);
1690*4882a593Smuzhiyun err = 0;
1691*4882a593Smuzhiyun break;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun default:
1694*4882a593Smuzhiyun err = -ENOIOCTLCMD;
1695*4882a593Smuzhiyun break;
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun return err;
1699*4882a593Smuzhiyun }
1700*4882a593Smuzhiyun
free_mux(struct rcu_head * rcu)1701*4882a593Smuzhiyun static void free_mux(struct rcu_head *rcu)
1702*4882a593Smuzhiyun {
1703*4882a593Smuzhiyun struct kcm_mux *mux = container_of(rcu,
1704*4882a593Smuzhiyun struct kcm_mux, rcu);
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun kmem_cache_free(kcm_muxp, mux);
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun
release_mux(struct kcm_mux * mux)1709*4882a593Smuzhiyun static void release_mux(struct kcm_mux *mux)
1710*4882a593Smuzhiyun {
1711*4882a593Smuzhiyun struct kcm_net *knet = mux->knet;
1712*4882a593Smuzhiyun struct kcm_psock *psock, *tmp_psock;
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun /* Release psocks */
1715*4882a593Smuzhiyun list_for_each_entry_safe(psock, tmp_psock,
1716*4882a593Smuzhiyun &mux->psocks, psock_list) {
1717*4882a593Smuzhiyun if (!WARN_ON(psock->unattaching))
1718*4882a593Smuzhiyun kcm_unattach(psock);
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun if (WARN_ON(mux->psocks_cnt))
1722*4882a593Smuzhiyun return;
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun __skb_queue_purge(&mux->rx_hold_queue);
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun mutex_lock(&knet->mutex);
1727*4882a593Smuzhiyun aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1728*4882a593Smuzhiyun aggregate_psock_stats(&mux->aggregate_psock_stats,
1729*4882a593Smuzhiyun &knet->aggregate_psock_stats);
1730*4882a593Smuzhiyun aggregate_strp_stats(&mux->aggregate_strp_stats,
1731*4882a593Smuzhiyun &knet->aggregate_strp_stats);
1732*4882a593Smuzhiyun list_del_rcu(&mux->kcm_mux_list);
1733*4882a593Smuzhiyun knet->count--;
1734*4882a593Smuzhiyun mutex_unlock(&knet->mutex);
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun call_rcu(&mux->rcu, free_mux);
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun
kcm_done(struct kcm_sock * kcm)1739*4882a593Smuzhiyun static void kcm_done(struct kcm_sock *kcm)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun struct kcm_mux *mux = kcm->mux;
1742*4882a593Smuzhiyun struct sock *sk = &kcm->sk;
1743*4882a593Smuzhiyun int socks_cnt;
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun spin_lock_bh(&mux->rx_lock);
1746*4882a593Smuzhiyun if (kcm->rx_psock) {
1747*4882a593Smuzhiyun /* Cleanup in unreserve_rx_kcm */
1748*4882a593Smuzhiyun WARN_ON(kcm->done);
1749*4882a593Smuzhiyun kcm->rx_disabled = 1;
1750*4882a593Smuzhiyun kcm->done = 1;
1751*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
1752*4882a593Smuzhiyun return;
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun if (kcm->rx_wait) {
1756*4882a593Smuzhiyun list_del(&kcm->wait_rx_list);
1757*4882a593Smuzhiyun /* paired with lockless reads in kcm_rfree() */
1758*4882a593Smuzhiyun WRITE_ONCE(kcm->rx_wait, false);
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun /* Move any pending receive messages to other kcm sockets */
1761*4882a593Smuzhiyun requeue_rx_msgs(mux, &sk->sk_receive_queue);
1762*4882a593Smuzhiyun
1763*4882a593Smuzhiyun spin_unlock_bh(&mux->rx_lock);
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun if (WARN_ON(sk_rmem_alloc_get(sk)))
1766*4882a593Smuzhiyun return;
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun /* Detach from MUX */
1769*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun list_del(&kcm->kcm_sock_list);
1772*4882a593Smuzhiyun mux->kcm_socks_cnt--;
1773*4882a593Smuzhiyun socks_cnt = mux->kcm_socks_cnt;
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun if (!socks_cnt) {
1778*4882a593Smuzhiyun /* We are done with the mux now. */
1779*4882a593Smuzhiyun release_mux(mux);
1780*4882a593Smuzhiyun }
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun WARN_ON(kcm->rx_wait);
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun sock_put(&kcm->sk);
1785*4882a593Smuzhiyun }
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun /* Called by kcm_release to close a KCM socket.
1788*4882a593Smuzhiyun * If this is the last KCM socket on the MUX, destroy the MUX.
1789*4882a593Smuzhiyun */
kcm_release(struct socket * sock)1790*4882a593Smuzhiyun static int kcm_release(struct socket *sock)
1791*4882a593Smuzhiyun {
1792*4882a593Smuzhiyun struct sock *sk = sock->sk;
1793*4882a593Smuzhiyun struct kcm_sock *kcm;
1794*4882a593Smuzhiyun struct kcm_mux *mux;
1795*4882a593Smuzhiyun struct kcm_psock *psock;
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun if (!sk)
1798*4882a593Smuzhiyun return 0;
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun kcm = kcm_sk(sk);
1801*4882a593Smuzhiyun mux = kcm->mux;
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun lock_sock(sk);
1804*4882a593Smuzhiyun sock_orphan(sk);
1805*4882a593Smuzhiyun kfree_skb(kcm->seq_skb);
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun /* Purge queue under lock to avoid race condition with tx_work trying
1808*4882a593Smuzhiyun * to act when queue is nonempty. If tx_work runs after this point
1809*4882a593Smuzhiyun * it will just return.
1810*4882a593Smuzhiyun */
1811*4882a593Smuzhiyun __skb_queue_purge(&sk->sk_write_queue);
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun /* Set tx_stopped. This is checked when psock is bound to a kcm and we
1814*4882a593Smuzhiyun * get a writespace callback. This prevents further work being queued
1815*4882a593Smuzhiyun * from the callback (unbinding the psock occurs after canceling work.
1816*4882a593Smuzhiyun */
1817*4882a593Smuzhiyun kcm->tx_stopped = 1;
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun release_sock(sk);
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun spin_lock_bh(&mux->lock);
1822*4882a593Smuzhiyun if (kcm->tx_wait) {
1823*4882a593Smuzhiyun /* Take of tx_wait list, after this point there should be no way
1824*4882a593Smuzhiyun * that a psock will be assigned to this kcm.
1825*4882a593Smuzhiyun */
1826*4882a593Smuzhiyun list_del(&kcm->wait_psock_list);
1827*4882a593Smuzhiyun kcm->tx_wait = false;
1828*4882a593Smuzhiyun }
1829*4882a593Smuzhiyun spin_unlock_bh(&mux->lock);
1830*4882a593Smuzhiyun
1831*4882a593Smuzhiyun /* Cancel work. After this point there should be no outside references
1832*4882a593Smuzhiyun * to the kcm socket.
1833*4882a593Smuzhiyun */
1834*4882a593Smuzhiyun cancel_work_sync(&kcm->tx_work);
1835*4882a593Smuzhiyun
1836*4882a593Smuzhiyun lock_sock(sk);
1837*4882a593Smuzhiyun psock = kcm->tx_psock;
1838*4882a593Smuzhiyun if (psock) {
1839*4882a593Smuzhiyun /* A psock was reserved, so we need to kill it since it
1840*4882a593Smuzhiyun * may already have some bytes queued from a message. We
1841*4882a593Smuzhiyun * need to do this after removing kcm from tx_wait list.
1842*4882a593Smuzhiyun */
1843*4882a593Smuzhiyun kcm_abort_tx_psock(psock, EPIPE, false);
1844*4882a593Smuzhiyun unreserve_psock(kcm);
1845*4882a593Smuzhiyun }
1846*4882a593Smuzhiyun release_sock(sk);
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun WARN_ON(kcm->tx_wait);
1849*4882a593Smuzhiyun WARN_ON(kcm->tx_psock);
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun sock->sk = NULL;
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun kcm_done(kcm);
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun return 0;
1856*4882a593Smuzhiyun }
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun static const struct proto_ops kcm_dgram_ops = {
1859*4882a593Smuzhiyun .family = PF_KCM,
1860*4882a593Smuzhiyun .owner = THIS_MODULE,
1861*4882a593Smuzhiyun .release = kcm_release,
1862*4882a593Smuzhiyun .bind = sock_no_bind,
1863*4882a593Smuzhiyun .connect = sock_no_connect,
1864*4882a593Smuzhiyun .socketpair = sock_no_socketpair,
1865*4882a593Smuzhiyun .accept = sock_no_accept,
1866*4882a593Smuzhiyun .getname = sock_no_getname,
1867*4882a593Smuzhiyun .poll = datagram_poll,
1868*4882a593Smuzhiyun .ioctl = kcm_ioctl,
1869*4882a593Smuzhiyun .listen = sock_no_listen,
1870*4882a593Smuzhiyun .shutdown = sock_no_shutdown,
1871*4882a593Smuzhiyun .setsockopt = kcm_setsockopt,
1872*4882a593Smuzhiyun .getsockopt = kcm_getsockopt,
1873*4882a593Smuzhiyun .sendmsg = kcm_sendmsg,
1874*4882a593Smuzhiyun .recvmsg = kcm_recvmsg,
1875*4882a593Smuzhiyun .mmap = sock_no_mmap,
1876*4882a593Smuzhiyun .sendpage = kcm_sendpage,
1877*4882a593Smuzhiyun };
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun static const struct proto_ops kcm_seqpacket_ops = {
1880*4882a593Smuzhiyun .family = PF_KCM,
1881*4882a593Smuzhiyun .owner = THIS_MODULE,
1882*4882a593Smuzhiyun .release = kcm_release,
1883*4882a593Smuzhiyun .bind = sock_no_bind,
1884*4882a593Smuzhiyun .connect = sock_no_connect,
1885*4882a593Smuzhiyun .socketpair = sock_no_socketpair,
1886*4882a593Smuzhiyun .accept = sock_no_accept,
1887*4882a593Smuzhiyun .getname = sock_no_getname,
1888*4882a593Smuzhiyun .poll = datagram_poll,
1889*4882a593Smuzhiyun .ioctl = kcm_ioctl,
1890*4882a593Smuzhiyun .listen = sock_no_listen,
1891*4882a593Smuzhiyun .shutdown = sock_no_shutdown,
1892*4882a593Smuzhiyun .setsockopt = kcm_setsockopt,
1893*4882a593Smuzhiyun .getsockopt = kcm_getsockopt,
1894*4882a593Smuzhiyun .sendmsg = kcm_sendmsg,
1895*4882a593Smuzhiyun .recvmsg = kcm_recvmsg,
1896*4882a593Smuzhiyun .mmap = sock_no_mmap,
1897*4882a593Smuzhiyun .sendpage = kcm_sendpage,
1898*4882a593Smuzhiyun .splice_read = kcm_splice_read,
1899*4882a593Smuzhiyun };
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun /* Create proto operation for kcm sockets */
kcm_create(struct net * net,struct socket * sock,int protocol,int kern)1902*4882a593Smuzhiyun static int kcm_create(struct net *net, struct socket *sock,
1903*4882a593Smuzhiyun int protocol, int kern)
1904*4882a593Smuzhiyun {
1905*4882a593Smuzhiyun struct kcm_net *knet = net_generic(net, kcm_net_id);
1906*4882a593Smuzhiyun struct sock *sk;
1907*4882a593Smuzhiyun struct kcm_mux *mux;
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun switch (sock->type) {
1910*4882a593Smuzhiyun case SOCK_DGRAM:
1911*4882a593Smuzhiyun sock->ops = &kcm_dgram_ops;
1912*4882a593Smuzhiyun break;
1913*4882a593Smuzhiyun case SOCK_SEQPACKET:
1914*4882a593Smuzhiyun sock->ops = &kcm_seqpacket_ops;
1915*4882a593Smuzhiyun break;
1916*4882a593Smuzhiyun default:
1917*4882a593Smuzhiyun return -ESOCKTNOSUPPORT;
1918*4882a593Smuzhiyun }
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun if (protocol != KCMPROTO_CONNECTED)
1921*4882a593Smuzhiyun return -EPROTONOSUPPORT;
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1924*4882a593Smuzhiyun if (!sk)
1925*4882a593Smuzhiyun return -ENOMEM;
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun /* Allocate a kcm mux, shared between KCM sockets */
1928*4882a593Smuzhiyun mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1929*4882a593Smuzhiyun if (!mux) {
1930*4882a593Smuzhiyun sk_free(sk);
1931*4882a593Smuzhiyun return -ENOMEM;
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun spin_lock_init(&mux->lock);
1935*4882a593Smuzhiyun spin_lock_init(&mux->rx_lock);
1936*4882a593Smuzhiyun INIT_LIST_HEAD(&mux->kcm_socks);
1937*4882a593Smuzhiyun INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1938*4882a593Smuzhiyun INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyun INIT_LIST_HEAD(&mux->psocks);
1941*4882a593Smuzhiyun INIT_LIST_HEAD(&mux->psocks_ready);
1942*4882a593Smuzhiyun INIT_LIST_HEAD(&mux->psocks_avail);
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun mux->knet = knet;
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun /* Add new MUX to list */
1947*4882a593Smuzhiyun mutex_lock(&knet->mutex);
1948*4882a593Smuzhiyun list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1949*4882a593Smuzhiyun knet->count++;
1950*4882a593Smuzhiyun mutex_unlock(&knet->mutex);
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun skb_queue_head_init(&mux->rx_hold_queue);
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun /* Init KCM socket */
1955*4882a593Smuzhiyun sock_init_data(sock, sk);
1956*4882a593Smuzhiyun init_kcm_sock(kcm_sk(sk), mux);
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun return 0;
1959*4882a593Smuzhiyun }
1960*4882a593Smuzhiyun
1961*4882a593Smuzhiyun static const struct net_proto_family kcm_family_ops = {
1962*4882a593Smuzhiyun .family = PF_KCM,
1963*4882a593Smuzhiyun .create = kcm_create,
1964*4882a593Smuzhiyun .owner = THIS_MODULE,
1965*4882a593Smuzhiyun };
1966*4882a593Smuzhiyun
kcm_init_net(struct net * net)1967*4882a593Smuzhiyun static __net_init int kcm_init_net(struct net *net)
1968*4882a593Smuzhiyun {
1969*4882a593Smuzhiyun struct kcm_net *knet = net_generic(net, kcm_net_id);
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun INIT_LIST_HEAD_RCU(&knet->mux_list);
1972*4882a593Smuzhiyun mutex_init(&knet->mutex);
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun return 0;
1975*4882a593Smuzhiyun }
1976*4882a593Smuzhiyun
kcm_exit_net(struct net * net)1977*4882a593Smuzhiyun static __net_exit void kcm_exit_net(struct net *net)
1978*4882a593Smuzhiyun {
1979*4882a593Smuzhiyun struct kcm_net *knet = net_generic(net, kcm_net_id);
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun /* All KCM sockets should be closed at this point, which should mean
1982*4882a593Smuzhiyun * that all multiplexors and psocks have been destroyed.
1983*4882a593Smuzhiyun */
1984*4882a593Smuzhiyun WARN_ON(!list_empty(&knet->mux_list));
1985*4882a593Smuzhiyun }
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun static struct pernet_operations kcm_net_ops = {
1988*4882a593Smuzhiyun .init = kcm_init_net,
1989*4882a593Smuzhiyun .exit = kcm_exit_net,
1990*4882a593Smuzhiyun .id = &kcm_net_id,
1991*4882a593Smuzhiyun .size = sizeof(struct kcm_net),
1992*4882a593Smuzhiyun };
1993*4882a593Smuzhiyun
kcm_init(void)1994*4882a593Smuzhiyun static int __init kcm_init(void)
1995*4882a593Smuzhiyun {
1996*4882a593Smuzhiyun int err = -ENOMEM;
1997*4882a593Smuzhiyun
1998*4882a593Smuzhiyun kcm_muxp = kmem_cache_create("kcm_mux_cache",
1999*4882a593Smuzhiyun sizeof(struct kcm_mux), 0,
2000*4882a593Smuzhiyun SLAB_HWCACHE_ALIGN, NULL);
2001*4882a593Smuzhiyun if (!kcm_muxp)
2002*4882a593Smuzhiyun goto fail;
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun kcm_psockp = kmem_cache_create("kcm_psock_cache",
2005*4882a593Smuzhiyun sizeof(struct kcm_psock), 0,
2006*4882a593Smuzhiyun SLAB_HWCACHE_ALIGN, NULL);
2007*4882a593Smuzhiyun if (!kcm_psockp)
2008*4882a593Smuzhiyun goto fail;
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun kcm_wq = create_singlethread_workqueue("kkcmd");
2011*4882a593Smuzhiyun if (!kcm_wq)
2012*4882a593Smuzhiyun goto fail;
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun err = proto_register(&kcm_proto, 1);
2015*4882a593Smuzhiyun if (err)
2016*4882a593Smuzhiyun goto fail;
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun err = register_pernet_device(&kcm_net_ops);
2019*4882a593Smuzhiyun if (err)
2020*4882a593Smuzhiyun goto net_ops_fail;
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun err = sock_register(&kcm_family_ops);
2023*4882a593Smuzhiyun if (err)
2024*4882a593Smuzhiyun goto sock_register_fail;
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun err = kcm_proc_init();
2027*4882a593Smuzhiyun if (err)
2028*4882a593Smuzhiyun goto proc_init_fail;
2029*4882a593Smuzhiyun
2030*4882a593Smuzhiyun return 0;
2031*4882a593Smuzhiyun
2032*4882a593Smuzhiyun proc_init_fail:
2033*4882a593Smuzhiyun sock_unregister(PF_KCM);
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun sock_register_fail:
2036*4882a593Smuzhiyun unregister_pernet_device(&kcm_net_ops);
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun net_ops_fail:
2039*4882a593Smuzhiyun proto_unregister(&kcm_proto);
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun fail:
2042*4882a593Smuzhiyun kmem_cache_destroy(kcm_muxp);
2043*4882a593Smuzhiyun kmem_cache_destroy(kcm_psockp);
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun if (kcm_wq)
2046*4882a593Smuzhiyun destroy_workqueue(kcm_wq);
2047*4882a593Smuzhiyun
2048*4882a593Smuzhiyun return err;
2049*4882a593Smuzhiyun }
2050*4882a593Smuzhiyun
kcm_exit(void)2051*4882a593Smuzhiyun static void __exit kcm_exit(void)
2052*4882a593Smuzhiyun {
2053*4882a593Smuzhiyun kcm_proc_exit();
2054*4882a593Smuzhiyun sock_unregister(PF_KCM);
2055*4882a593Smuzhiyun unregister_pernet_device(&kcm_net_ops);
2056*4882a593Smuzhiyun proto_unregister(&kcm_proto);
2057*4882a593Smuzhiyun destroy_workqueue(kcm_wq);
2058*4882a593Smuzhiyun
2059*4882a593Smuzhiyun kmem_cache_destroy(kcm_muxp);
2060*4882a593Smuzhiyun kmem_cache_destroy(kcm_psockp);
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun module_init(kcm_init);
2064*4882a593Smuzhiyun module_exit(kcm_exit);
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2067*4882a593Smuzhiyun MODULE_ALIAS_NETPROTO(PF_KCM);
2068