1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Shared Memory Communications over RDMA (SMC-R) and RoCE
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Manage RMBE
6*4882a593Smuzhiyun * copy new RMBE data into user space
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright IBM Corp. 2016
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/net.h>
14*4882a593Smuzhiyun #include <linux/rcupdate.h>
15*4882a593Smuzhiyun #include <linux/sched/signal.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <net/sock.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "smc.h"
20*4882a593Smuzhiyun #include "smc_core.h"
21*4882a593Smuzhiyun #include "smc_cdc.h"
22*4882a593Smuzhiyun #include "smc_tx.h" /* smc_tx_consumer_update() */
23*4882a593Smuzhiyun #include "smc_rx.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* callback implementation to wakeup consumers blocked with smc_rx_wait().
26*4882a593Smuzhiyun * indirectly called by smc_cdc_msg_recv_action().
27*4882a593Smuzhiyun */
smc_rx_wake_up(struct sock * sk)28*4882a593Smuzhiyun static void smc_rx_wake_up(struct sock *sk)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun struct socket_wq *wq;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* derived from sock_def_readable() */
33*4882a593Smuzhiyun /* called already in smc_listen_work() */
34*4882a593Smuzhiyun rcu_read_lock();
35*4882a593Smuzhiyun wq = rcu_dereference(sk->sk_wq);
36*4882a593Smuzhiyun if (skwq_has_sleeper(wq))
37*4882a593Smuzhiyun wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
38*4882a593Smuzhiyun EPOLLRDNORM | EPOLLRDBAND);
39*4882a593Smuzhiyun sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
40*4882a593Smuzhiyun if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
41*4882a593Smuzhiyun (sk->sk_state == SMC_CLOSED))
42*4882a593Smuzhiyun sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
43*4882a593Smuzhiyun rcu_read_unlock();
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* Update consumer cursor
47*4882a593Smuzhiyun * @conn connection to update
48*4882a593Smuzhiyun * @cons consumer cursor
49*4882a593Smuzhiyun * @len number of Bytes consumed
50*4882a593Smuzhiyun * Returns:
51*4882a593Smuzhiyun * 1 if we should end our receive, 0 otherwise
52*4882a593Smuzhiyun */
smc_rx_update_consumer(struct smc_sock * smc,union smc_host_cursor cons,size_t len)53*4882a593Smuzhiyun static int smc_rx_update_consumer(struct smc_sock *smc,
54*4882a593Smuzhiyun union smc_host_cursor cons, size_t len)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct smc_connection *conn = &smc->conn;
57*4882a593Smuzhiyun struct sock *sk = &smc->sk;
58*4882a593Smuzhiyun bool force = false;
59*4882a593Smuzhiyun int diff, rc = 0;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun smc_curs_add(conn->rmb_desc->len, &cons, len);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* did we process urgent data? */
64*4882a593Smuzhiyun if (conn->urg_state == SMC_URG_VALID || conn->urg_rx_skip_pend) {
65*4882a593Smuzhiyun diff = smc_curs_comp(conn->rmb_desc->len, &cons,
66*4882a593Smuzhiyun &conn->urg_curs);
67*4882a593Smuzhiyun if (sock_flag(sk, SOCK_URGINLINE)) {
68*4882a593Smuzhiyun if (diff == 0) {
69*4882a593Smuzhiyun force = true;
70*4882a593Smuzhiyun rc = 1;
71*4882a593Smuzhiyun conn->urg_state = SMC_URG_READ;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun } else {
74*4882a593Smuzhiyun if (diff == 1) {
75*4882a593Smuzhiyun /* skip urgent byte */
76*4882a593Smuzhiyun force = true;
77*4882a593Smuzhiyun smc_curs_add(conn->rmb_desc->len, &cons, 1);
78*4882a593Smuzhiyun conn->urg_rx_skip_pend = false;
79*4882a593Smuzhiyun } else if (diff < -1)
80*4882a593Smuzhiyun /* we read past urgent byte */
81*4882a593Smuzhiyun conn->urg_state = SMC_URG_READ;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun smc_curs_copy(&conn->local_tx_ctrl.cons, &cons, conn);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* send consumer cursor update if required */
88*4882a593Smuzhiyun /* similar to advertising new TCP rcv_wnd if required */
89*4882a593Smuzhiyun smc_tx_consumer_update(conn, force);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun return rc;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
smc_rx_update_cons(struct smc_sock * smc,size_t len)94*4882a593Smuzhiyun static void smc_rx_update_cons(struct smc_sock *smc, size_t len)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun struct smc_connection *conn = &smc->conn;
97*4882a593Smuzhiyun union smc_host_cursor cons;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
100*4882a593Smuzhiyun smc_rx_update_consumer(smc, cons, len);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun struct smc_spd_priv {
104*4882a593Smuzhiyun struct smc_sock *smc;
105*4882a593Smuzhiyun size_t len;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun
smc_rx_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)108*4882a593Smuzhiyun static void smc_rx_pipe_buf_release(struct pipe_inode_info *pipe,
109*4882a593Smuzhiyun struct pipe_buffer *buf)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct smc_spd_priv *priv = (struct smc_spd_priv *)buf->private;
112*4882a593Smuzhiyun struct smc_sock *smc = priv->smc;
113*4882a593Smuzhiyun struct smc_connection *conn;
114*4882a593Smuzhiyun struct sock *sk = &smc->sk;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (sk->sk_state == SMC_CLOSED ||
117*4882a593Smuzhiyun sk->sk_state == SMC_PEERFINCLOSEWAIT ||
118*4882a593Smuzhiyun sk->sk_state == SMC_APPFINCLOSEWAIT)
119*4882a593Smuzhiyun goto out;
120*4882a593Smuzhiyun conn = &smc->conn;
121*4882a593Smuzhiyun lock_sock(sk);
122*4882a593Smuzhiyun smc_rx_update_cons(smc, priv->len);
123*4882a593Smuzhiyun release_sock(sk);
124*4882a593Smuzhiyun if (atomic_sub_and_test(priv->len, &conn->splice_pending))
125*4882a593Smuzhiyun smc_rx_wake_up(sk);
126*4882a593Smuzhiyun out:
127*4882a593Smuzhiyun kfree(priv);
128*4882a593Smuzhiyun put_page(buf->page);
129*4882a593Smuzhiyun sock_put(sk);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun static const struct pipe_buf_operations smc_pipe_ops = {
133*4882a593Smuzhiyun .release = smc_rx_pipe_buf_release,
134*4882a593Smuzhiyun .get = generic_pipe_buf_get
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun
smc_rx_spd_release(struct splice_pipe_desc * spd,unsigned int i)137*4882a593Smuzhiyun static void smc_rx_spd_release(struct splice_pipe_desc *spd,
138*4882a593Smuzhiyun unsigned int i)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun put_page(spd->pages[i]);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
smc_rx_splice(struct pipe_inode_info * pipe,char * src,size_t len,struct smc_sock * smc)143*4882a593Smuzhiyun static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
144*4882a593Smuzhiyun struct smc_sock *smc)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun struct splice_pipe_desc spd;
147*4882a593Smuzhiyun struct partial_page partial;
148*4882a593Smuzhiyun struct smc_spd_priv *priv;
149*4882a593Smuzhiyun int bytes;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun priv = kzalloc(sizeof(*priv), GFP_KERNEL);
152*4882a593Smuzhiyun if (!priv)
153*4882a593Smuzhiyun return -ENOMEM;
154*4882a593Smuzhiyun priv->len = len;
155*4882a593Smuzhiyun priv->smc = smc;
156*4882a593Smuzhiyun partial.offset = src - (char *)smc->conn.rmb_desc->cpu_addr;
157*4882a593Smuzhiyun partial.len = len;
158*4882a593Smuzhiyun partial.private = (unsigned long)priv;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun spd.nr_pages_max = 1;
161*4882a593Smuzhiyun spd.nr_pages = 1;
162*4882a593Smuzhiyun spd.pages = &smc->conn.rmb_desc->pages;
163*4882a593Smuzhiyun spd.partial = &partial;
164*4882a593Smuzhiyun spd.ops = &smc_pipe_ops;
165*4882a593Smuzhiyun spd.spd_release = smc_rx_spd_release;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun bytes = splice_to_pipe(pipe, &spd);
168*4882a593Smuzhiyun if (bytes > 0) {
169*4882a593Smuzhiyun sock_hold(&smc->sk);
170*4882a593Smuzhiyun get_page(smc->conn.rmb_desc->pages);
171*4882a593Smuzhiyun atomic_add(bytes, &smc->conn.splice_pending);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return bytes;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
smc_rx_data_available_and_no_splice_pend(struct smc_connection * conn)177*4882a593Smuzhiyun static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun return atomic_read(&conn->bytes_to_rcv) &&
180*4882a593Smuzhiyun !atomic_read(&conn->splice_pending);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted
184*4882a593Smuzhiyun * @smc smc socket
185*4882a593Smuzhiyun * @timeo pointer to max seconds to wait, pointer to value 0 for no timeout
186*4882a593Smuzhiyun * @fcrit add'l criterion to evaluate as function pointer
187*4882a593Smuzhiyun * Returns:
188*4882a593Smuzhiyun * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
189*4882a593Smuzhiyun * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
190*4882a593Smuzhiyun */
smc_rx_wait(struct smc_sock * smc,long * timeo,int (* fcrit)(struct smc_connection * conn))191*4882a593Smuzhiyun int smc_rx_wait(struct smc_sock *smc, long *timeo,
192*4882a593Smuzhiyun int (*fcrit)(struct smc_connection *conn))
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun DEFINE_WAIT_FUNC(wait, woken_wake_function);
195*4882a593Smuzhiyun struct smc_connection *conn = &smc->conn;
196*4882a593Smuzhiyun struct smc_cdc_conn_state_flags *cflags =
197*4882a593Smuzhiyun &conn->local_tx_ctrl.conn_state_flags;
198*4882a593Smuzhiyun struct sock *sk = &smc->sk;
199*4882a593Smuzhiyun int rc;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (fcrit(conn))
202*4882a593Smuzhiyun return 1;
203*4882a593Smuzhiyun sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
204*4882a593Smuzhiyun add_wait_queue(sk_sleep(sk), &wait);
205*4882a593Smuzhiyun rc = sk_wait_event(sk, timeo,
206*4882a593Smuzhiyun sk->sk_err ||
207*4882a593Smuzhiyun cflags->peer_conn_abort ||
208*4882a593Smuzhiyun sk->sk_shutdown & RCV_SHUTDOWN ||
209*4882a593Smuzhiyun conn->killed ||
210*4882a593Smuzhiyun fcrit(conn),
211*4882a593Smuzhiyun &wait);
212*4882a593Smuzhiyun remove_wait_queue(sk_sleep(sk), &wait);
213*4882a593Smuzhiyun sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
214*4882a593Smuzhiyun return rc;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
smc_rx_recv_urg(struct smc_sock * smc,struct msghdr * msg,int len,int flags)217*4882a593Smuzhiyun static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
218*4882a593Smuzhiyun int flags)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun struct smc_connection *conn = &smc->conn;
221*4882a593Smuzhiyun union smc_host_cursor cons;
222*4882a593Smuzhiyun struct sock *sk = &smc->sk;
223*4882a593Smuzhiyun int rc = 0;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (sock_flag(sk, SOCK_URGINLINE) ||
226*4882a593Smuzhiyun !(conn->urg_state == SMC_URG_VALID) ||
227*4882a593Smuzhiyun conn->urg_state == SMC_URG_READ)
228*4882a593Smuzhiyun return -EINVAL;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if (conn->urg_state == SMC_URG_VALID) {
231*4882a593Smuzhiyun if (!(flags & MSG_PEEK))
232*4882a593Smuzhiyun smc->conn.urg_state = SMC_URG_READ;
233*4882a593Smuzhiyun msg->msg_flags |= MSG_OOB;
234*4882a593Smuzhiyun if (len > 0) {
235*4882a593Smuzhiyun if (!(flags & MSG_TRUNC))
236*4882a593Smuzhiyun rc = memcpy_to_msg(msg, &conn->urg_rx_byte, 1);
237*4882a593Smuzhiyun len = 1;
238*4882a593Smuzhiyun smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
239*4882a593Smuzhiyun if (smc_curs_diff(conn->rmb_desc->len, &cons,
240*4882a593Smuzhiyun &conn->urg_curs) > 1)
241*4882a593Smuzhiyun conn->urg_rx_skip_pend = true;
242*4882a593Smuzhiyun /* Urgent Byte was already accounted for, but trigger
243*4882a593Smuzhiyun * skipping the urgent byte in non-inline case
244*4882a593Smuzhiyun */
245*4882a593Smuzhiyun if (!(flags & MSG_PEEK))
246*4882a593Smuzhiyun smc_rx_update_consumer(smc, cons, 0);
247*4882a593Smuzhiyun } else {
248*4882a593Smuzhiyun msg->msg_flags |= MSG_TRUNC;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun return rc ? -EFAULT : len;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun if (sk->sk_state == SMC_CLOSED || sk->sk_shutdown & RCV_SHUTDOWN)
255*4882a593Smuzhiyun return 0;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun return -EAGAIN;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
smc_rx_recvmsg_data_available(struct smc_sock * smc)260*4882a593Smuzhiyun static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct smc_connection *conn = &smc->conn;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (smc_rx_data_available(conn))
265*4882a593Smuzhiyun return true;
266*4882a593Smuzhiyun else if (conn->urg_state == SMC_URG_VALID)
267*4882a593Smuzhiyun /* we received a single urgent Byte - skip */
268*4882a593Smuzhiyun smc_rx_update_cons(smc, 0);
269*4882a593Smuzhiyun return false;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* smc_rx_recvmsg - receive data from RMBE
273*4882a593Smuzhiyun * @msg: copy data to receive buffer
274*4882a593Smuzhiyun * @pipe: copy data to pipe if set - indicates splice() call
275*4882a593Smuzhiyun *
276*4882a593Smuzhiyun * rcvbuf consumer: main API called by socket layer.
277*4882a593Smuzhiyun * Called under sk lock.
278*4882a593Smuzhiyun */
smc_rx_recvmsg(struct smc_sock * smc,struct msghdr * msg,struct pipe_inode_info * pipe,size_t len,int flags)279*4882a593Smuzhiyun int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
280*4882a593Smuzhiyun struct pipe_inode_info *pipe, size_t len, int flags)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun size_t copylen, read_done = 0, read_remaining = len;
283*4882a593Smuzhiyun size_t chunk_len, chunk_off, chunk_len_sum;
284*4882a593Smuzhiyun struct smc_connection *conn = &smc->conn;
285*4882a593Smuzhiyun int (*func)(struct smc_connection *conn);
286*4882a593Smuzhiyun union smc_host_cursor cons;
287*4882a593Smuzhiyun int readable, chunk;
288*4882a593Smuzhiyun char *rcvbuf_base;
289*4882a593Smuzhiyun struct sock *sk;
290*4882a593Smuzhiyun int splbytes;
291*4882a593Smuzhiyun long timeo;
292*4882a593Smuzhiyun int target; /* Read at least these many bytes */
293*4882a593Smuzhiyun int rc;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (unlikely(flags & MSG_ERRQUEUE))
296*4882a593Smuzhiyun return -EINVAL; /* future work for sk.sk_family == AF_SMC */
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun sk = &smc->sk;
299*4882a593Smuzhiyun if (sk->sk_state == SMC_LISTEN)
300*4882a593Smuzhiyun return -ENOTCONN;
301*4882a593Smuzhiyun if (flags & MSG_OOB)
302*4882a593Smuzhiyun return smc_rx_recv_urg(smc, msg, len, flags);
303*4882a593Smuzhiyun timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
304*4882a593Smuzhiyun target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
307*4882a593Smuzhiyun rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun do { /* while (read_remaining) */
310*4882a593Smuzhiyun if (read_done >= target || (pipe && read_done))
311*4882a593Smuzhiyun break;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (conn->killed)
314*4882a593Smuzhiyun break;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (smc_rx_recvmsg_data_available(smc))
317*4882a593Smuzhiyun goto copy;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (sk->sk_shutdown & RCV_SHUTDOWN) {
320*4882a593Smuzhiyun /* smc_cdc_msg_recv_action() could have run after
321*4882a593Smuzhiyun * above smc_rx_recvmsg_data_available()
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun if (smc_rx_recvmsg_data_available(smc))
324*4882a593Smuzhiyun goto copy;
325*4882a593Smuzhiyun break;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (read_done) {
329*4882a593Smuzhiyun if (sk->sk_err ||
330*4882a593Smuzhiyun sk->sk_state == SMC_CLOSED ||
331*4882a593Smuzhiyun !timeo ||
332*4882a593Smuzhiyun signal_pending(current))
333*4882a593Smuzhiyun break;
334*4882a593Smuzhiyun } else {
335*4882a593Smuzhiyun if (sk->sk_err) {
336*4882a593Smuzhiyun read_done = sock_error(sk);
337*4882a593Smuzhiyun break;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun if (sk->sk_state == SMC_CLOSED) {
340*4882a593Smuzhiyun if (!sock_flag(sk, SOCK_DONE)) {
341*4882a593Smuzhiyun /* This occurs when user tries to read
342*4882a593Smuzhiyun * from never connected socket.
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun read_done = -ENOTCONN;
345*4882a593Smuzhiyun break;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun break;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun if (!timeo)
350*4882a593Smuzhiyun return -EAGAIN;
351*4882a593Smuzhiyun if (signal_pending(current)) {
352*4882a593Smuzhiyun read_done = sock_intr_errno(timeo);
353*4882a593Smuzhiyun break;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (!smc_rx_data_available(conn)) {
358*4882a593Smuzhiyun smc_rx_wait(smc, &timeo, smc_rx_data_available);
359*4882a593Smuzhiyun continue;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun copy:
363*4882a593Smuzhiyun /* initialize variables for 1st iteration of subsequent loop */
364*4882a593Smuzhiyun /* could be just 1 byte, even after waiting on data above */
365*4882a593Smuzhiyun readable = atomic_read(&conn->bytes_to_rcv);
366*4882a593Smuzhiyun splbytes = atomic_read(&conn->splice_pending);
367*4882a593Smuzhiyun if (!readable || (msg && splbytes)) {
368*4882a593Smuzhiyun if (splbytes)
369*4882a593Smuzhiyun func = smc_rx_data_available_and_no_splice_pend;
370*4882a593Smuzhiyun else
371*4882a593Smuzhiyun func = smc_rx_data_available;
372*4882a593Smuzhiyun smc_rx_wait(smc, &timeo, func);
373*4882a593Smuzhiyun continue;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
377*4882a593Smuzhiyun /* subsequent splice() calls pick up where previous left */
378*4882a593Smuzhiyun if (splbytes)
379*4882a593Smuzhiyun smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
380*4882a593Smuzhiyun if (conn->urg_state == SMC_URG_VALID &&
381*4882a593Smuzhiyun sock_flag(&smc->sk, SOCK_URGINLINE) &&
382*4882a593Smuzhiyun readable > 1)
383*4882a593Smuzhiyun readable--; /* always stop at urgent Byte */
384*4882a593Smuzhiyun /* not more than what user space asked for */
385*4882a593Smuzhiyun copylen = min_t(size_t, read_remaining, readable);
386*4882a593Smuzhiyun /* determine chunks where to read from rcvbuf */
387*4882a593Smuzhiyun /* either unwrapped case, or 1st chunk of wrapped case */
388*4882a593Smuzhiyun chunk_len = min_t(size_t, copylen, conn->rmb_desc->len -
389*4882a593Smuzhiyun cons.count);
390*4882a593Smuzhiyun chunk_len_sum = chunk_len;
391*4882a593Smuzhiyun chunk_off = cons.count;
392*4882a593Smuzhiyun smc_rmb_sync_sg_for_cpu(conn);
393*4882a593Smuzhiyun for (chunk = 0; chunk < 2; chunk++) {
394*4882a593Smuzhiyun if (!(flags & MSG_TRUNC)) {
395*4882a593Smuzhiyun if (msg) {
396*4882a593Smuzhiyun rc = memcpy_to_msg(msg, rcvbuf_base +
397*4882a593Smuzhiyun chunk_off,
398*4882a593Smuzhiyun chunk_len);
399*4882a593Smuzhiyun } else {
400*4882a593Smuzhiyun rc = smc_rx_splice(pipe, rcvbuf_base +
401*4882a593Smuzhiyun chunk_off, chunk_len,
402*4882a593Smuzhiyun smc);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun if (rc < 0) {
405*4882a593Smuzhiyun if (!read_done)
406*4882a593Smuzhiyun read_done = -EFAULT;
407*4882a593Smuzhiyun smc_rmb_sync_sg_for_device(conn);
408*4882a593Smuzhiyun goto out;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun read_remaining -= chunk_len;
412*4882a593Smuzhiyun read_done += chunk_len;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (chunk_len_sum == copylen)
415*4882a593Smuzhiyun break; /* either on 1st or 2nd iteration */
416*4882a593Smuzhiyun /* prepare next (== 2nd) iteration */
417*4882a593Smuzhiyun chunk_len = copylen - chunk_len; /* remainder */
418*4882a593Smuzhiyun chunk_len_sum += chunk_len;
419*4882a593Smuzhiyun chunk_off = 0; /* modulo offset in recv ring buffer */
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun smc_rmb_sync_sg_for_device(conn);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* update cursors */
424*4882a593Smuzhiyun if (!(flags & MSG_PEEK)) {
425*4882a593Smuzhiyun /* increased in recv tasklet smc_cdc_msg_rcv() */
426*4882a593Smuzhiyun smp_mb__before_atomic();
427*4882a593Smuzhiyun atomic_sub(copylen, &conn->bytes_to_rcv);
428*4882a593Smuzhiyun /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
429*4882a593Smuzhiyun smp_mb__after_atomic();
430*4882a593Smuzhiyun if (msg && smc_rx_update_consumer(smc, cons, copylen))
431*4882a593Smuzhiyun goto out;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun } while (read_remaining);
434*4882a593Smuzhiyun out:
435*4882a593Smuzhiyun return read_done;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /* Initialize receive properties on connection establishment. NB: not __init! */
smc_rx_init(struct smc_sock * smc)439*4882a593Smuzhiyun void smc_rx_init(struct smc_sock *smc)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun smc->sk.sk_data_ready = smc_rx_wake_up;
442*4882a593Smuzhiyun atomic_set(&smc->conn.splice_pending, 0);
443*4882a593Smuzhiyun smc->conn.urg_state = SMC_URG_READ;
444*4882a593Smuzhiyun }
445