1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun * OpenIB.org BSD license below:
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun * without modification, are permitted provided that the following
12*4882a593Smuzhiyun * conditions are met:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * - Redistributions of source code must retain the above
15*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun * disclaimer.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun * provided with the distribution.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun * SOFTWARE.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun #include <linux/kernel.h>
34*4882a593Smuzhiyun #include <linux/moduleparam.h>
35*4882a593Smuzhiyun #include <linux/gfp.h>
36*4882a593Smuzhiyun #include <net/sock.h>
37*4882a593Smuzhiyun #include <linux/in.h>
38*4882a593Smuzhiyun #include <linux/list.h>
39*4882a593Smuzhiyun #include <linux/ratelimit.h>
40*4882a593Smuzhiyun #include <linux/export.h>
41*4882a593Smuzhiyun #include <linux/sizes.h>
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #include "rds.h"
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* When transmitting messages in rds_send_xmit, we need to emerge from
46*4882a593Smuzhiyun * time to time and briefly release the CPU. Otherwise the softlock watchdog
47*4882a593Smuzhiyun * will kick our shin.
48*4882a593Smuzhiyun * Also, it seems fairer to not let one busy connection stall all the
49*4882a593Smuzhiyun * others.
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * send_batch_count is the number of times we'll loop in send_xmit. Setting
52*4882a593Smuzhiyun * it to 0 will restore the old behavior (where we looped until we had
53*4882a593Smuzhiyun * drained the queue).
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun static int send_batch_count = SZ_1K;
56*4882a593Smuzhiyun module_param(send_batch_count, int, 0444);
57*4882a593Smuzhiyun MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun static void rds_send_remove_from_sock(struct list_head *messages, int status);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * Reset the send state. Callers must ensure that this doesn't race with
63*4882a593Smuzhiyun * rds_send_xmit().
64*4882a593Smuzhiyun */
rds_send_path_reset(struct rds_conn_path * cp)65*4882a593Smuzhiyun void rds_send_path_reset(struct rds_conn_path *cp)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct rds_message *rm, *tmp;
68*4882a593Smuzhiyun unsigned long flags;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (cp->cp_xmit_rm) {
71*4882a593Smuzhiyun rm = cp->cp_xmit_rm;
72*4882a593Smuzhiyun cp->cp_xmit_rm = NULL;
73*4882a593Smuzhiyun /* Tell the user the RDMA op is no longer mapped by the
74*4882a593Smuzhiyun * transport. This isn't entirely true (it's flushed out
75*4882a593Smuzhiyun * independently) but as the connection is down, there's
76*4882a593Smuzhiyun * no ongoing RDMA to/from that memory */
77*4882a593Smuzhiyun rds_message_unmapped(rm);
78*4882a593Smuzhiyun rds_message_put(rm);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun cp->cp_xmit_sg = 0;
82*4882a593Smuzhiyun cp->cp_xmit_hdr_off = 0;
83*4882a593Smuzhiyun cp->cp_xmit_data_off = 0;
84*4882a593Smuzhiyun cp->cp_xmit_atomic_sent = 0;
85*4882a593Smuzhiyun cp->cp_xmit_rdma_sent = 0;
86*4882a593Smuzhiyun cp->cp_xmit_data_sent = 0;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun cp->cp_conn->c_map_queued = 0;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
91*4882a593Smuzhiyun cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* Mark messages as retransmissions, and move them to the send q */
94*4882a593Smuzhiyun spin_lock_irqsave(&cp->cp_lock, flags);
95*4882a593Smuzhiyun list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
96*4882a593Smuzhiyun set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97*4882a593Smuzhiyun set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
100*4882a593Smuzhiyun spin_unlock_irqrestore(&cp->cp_lock, flags);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rds_send_path_reset);
103*4882a593Smuzhiyun
acquire_in_xmit(struct rds_conn_path * cp)104*4882a593Smuzhiyun static int acquire_in_xmit(struct rds_conn_path *cp)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
release_in_xmit(struct rds_conn_path * cp)109*4882a593Smuzhiyun static void release_in_xmit(struct rds_conn_path *cp)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun clear_bit(RDS_IN_XMIT, &cp->cp_flags);
112*4882a593Smuzhiyun smp_mb__after_atomic();
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
115*4882a593Smuzhiyun * hot path and finding waiters is very rare. We don't want to walk
116*4882a593Smuzhiyun * the system-wide hashed waitqueue buckets in the fast path only to
117*4882a593Smuzhiyun * almost never find waiters.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun if (waitqueue_active(&cp->cp_waitq))
120*4882a593Smuzhiyun wake_up_all(&cp->cp_waitq);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * We're making the conscious trade-off here to only send one message
125*4882a593Smuzhiyun * down the connection at a time.
126*4882a593Smuzhiyun * Pro:
127*4882a593Smuzhiyun * - tx queueing is a simple fifo list
128*4882a593Smuzhiyun * - reassembly is optional and easily done by transports per conn
129*4882a593Smuzhiyun * - no per flow rx lookup at all, straight to the socket
130*4882a593Smuzhiyun * - less per-frag memory and wire overhead
131*4882a593Smuzhiyun * Con:
132*4882a593Smuzhiyun * - queued acks can be delayed behind large messages
133*4882a593Smuzhiyun * Depends:
134*4882a593Smuzhiyun * - small message latency is higher behind queued large messages
135*4882a593Smuzhiyun * - large message latency isn't starved by intervening small sends
136*4882a593Smuzhiyun */
rds_send_xmit(struct rds_conn_path * cp)137*4882a593Smuzhiyun int rds_send_xmit(struct rds_conn_path *cp)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct rds_connection *conn = cp->cp_conn;
140*4882a593Smuzhiyun struct rds_message *rm;
141*4882a593Smuzhiyun unsigned long flags;
142*4882a593Smuzhiyun unsigned int tmp;
143*4882a593Smuzhiyun struct scatterlist *sg;
144*4882a593Smuzhiyun int ret = 0;
145*4882a593Smuzhiyun LIST_HEAD(to_be_dropped);
146*4882a593Smuzhiyun int batch_count;
147*4882a593Smuzhiyun unsigned long send_gen = 0;
148*4882a593Smuzhiyun int same_rm = 0;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun restart:
151*4882a593Smuzhiyun batch_count = 0;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * sendmsg calls here after having queued its message on the send
155*4882a593Smuzhiyun * queue. We only have one task feeding the connection at a time. If
156*4882a593Smuzhiyun * another thread is already feeding the queue then we back off. This
157*4882a593Smuzhiyun * avoids blocking the caller and trading per-connection data between
158*4882a593Smuzhiyun * caches per message.
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun if (!acquire_in_xmit(cp)) {
161*4882a593Smuzhiyun rds_stats_inc(s_send_lock_contention);
162*4882a593Smuzhiyun ret = -ENOMEM;
163*4882a593Smuzhiyun goto out;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (rds_destroy_pending(cp->cp_conn)) {
167*4882a593Smuzhiyun release_in_xmit(cp);
168*4882a593Smuzhiyun ret = -ENETUNREACH; /* dont requeue send work */
169*4882a593Smuzhiyun goto out;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * we record the send generation after doing the xmit acquire.
174*4882a593Smuzhiyun * if someone else manages to jump in and do some work, we'll use
175*4882a593Smuzhiyun * this to avoid a goto restart farther down.
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun * The acquire_in_xmit() check above ensures that only one
178*4882a593Smuzhiyun * caller can increment c_send_gen at any time.
179*4882a593Smuzhiyun */
180*4882a593Smuzhiyun send_gen = READ_ONCE(cp->cp_send_gen) + 1;
181*4882a593Smuzhiyun WRITE_ONCE(cp->cp_send_gen, send_gen);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
185*4882a593Smuzhiyun * we do the opposite to avoid races.
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun if (!rds_conn_path_up(cp)) {
188*4882a593Smuzhiyun release_in_xmit(cp);
189*4882a593Smuzhiyun ret = 0;
190*4882a593Smuzhiyun goto out;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (conn->c_trans->xmit_path_prepare)
194*4882a593Smuzhiyun conn->c_trans->xmit_path_prepare(cp);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * spin trying to push headers and data down the connection until
198*4882a593Smuzhiyun * the connection doesn't make forward progress.
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun while (1) {
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun rm = cp->cp_xmit_rm;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (!rm) {
205*4882a593Smuzhiyun same_rm = 0;
206*4882a593Smuzhiyun } else {
207*4882a593Smuzhiyun same_rm++;
208*4882a593Smuzhiyun if (same_rm >= 4096) {
209*4882a593Smuzhiyun rds_stats_inc(s_send_stuck_rm);
210*4882a593Smuzhiyun ret = -EAGAIN;
211*4882a593Smuzhiyun break;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun * If between sending messages, we can send a pending congestion
217*4882a593Smuzhiyun * map update.
218*4882a593Smuzhiyun */
219*4882a593Smuzhiyun if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
220*4882a593Smuzhiyun rm = rds_cong_update_alloc(conn);
221*4882a593Smuzhiyun if (IS_ERR(rm)) {
222*4882a593Smuzhiyun ret = PTR_ERR(rm);
223*4882a593Smuzhiyun break;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun rm->data.op_active = 1;
226*4882a593Smuzhiyun rm->m_inc.i_conn_path = cp;
227*4882a593Smuzhiyun rm->m_inc.i_conn = cp->cp_conn;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun cp->cp_xmit_rm = rm;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * If not already working on one, grab the next message.
234*4882a593Smuzhiyun *
235*4882a593Smuzhiyun * cp_xmit_rm holds a ref while we're sending this message down
236*4882a593Smuzhiyun * the connction. We can use this ref while holding the
237*4882a593Smuzhiyun * send_sem.. rds_send_reset() is serialized with it.
238*4882a593Smuzhiyun */
239*4882a593Smuzhiyun if (!rm) {
240*4882a593Smuzhiyun unsigned int len;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun batch_count++;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* we want to process as big a batch as we can, but
245*4882a593Smuzhiyun * we also want to avoid softlockups. If we've been
246*4882a593Smuzhiyun * through a lot of messages, lets back off and see
247*4882a593Smuzhiyun * if anyone else jumps in
248*4882a593Smuzhiyun */
249*4882a593Smuzhiyun if (batch_count >= send_batch_count)
250*4882a593Smuzhiyun goto over_batch;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun spin_lock_irqsave(&cp->cp_lock, flags);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun if (!list_empty(&cp->cp_send_queue)) {
255*4882a593Smuzhiyun rm = list_entry(cp->cp_send_queue.next,
256*4882a593Smuzhiyun struct rds_message,
257*4882a593Smuzhiyun m_conn_item);
258*4882a593Smuzhiyun rds_message_addref(rm);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * Move the message from the send queue to the retransmit
262*4882a593Smuzhiyun * list right away.
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun list_move_tail(&rm->m_conn_item,
265*4882a593Smuzhiyun &cp->cp_retrans);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun spin_unlock_irqrestore(&cp->cp_lock, flags);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (!rm)
271*4882a593Smuzhiyun break;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* Unfortunately, the way Infiniband deals with
274*4882a593Smuzhiyun * RDMA to a bad MR key is by moving the entire
275*4882a593Smuzhiyun * queue pair to error state. We cold possibly
276*4882a593Smuzhiyun * recover from that, but right now we drop the
277*4882a593Smuzhiyun * connection.
278*4882a593Smuzhiyun * Therefore, we never retransmit messages with RDMA ops.
279*4882a593Smuzhiyun */
280*4882a593Smuzhiyun if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
281*4882a593Smuzhiyun (rm->rdma.op_active &&
282*4882a593Smuzhiyun test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
283*4882a593Smuzhiyun spin_lock_irqsave(&cp->cp_lock, flags);
284*4882a593Smuzhiyun if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
285*4882a593Smuzhiyun list_move(&rm->m_conn_item, &to_be_dropped);
286*4882a593Smuzhiyun spin_unlock_irqrestore(&cp->cp_lock, flags);
287*4882a593Smuzhiyun continue;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* Require an ACK every once in a while */
291*4882a593Smuzhiyun len = ntohl(rm->m_inc.i_hdr.h_len);
292*4882a593Smuzhiyun if (cp->cp_unacked_packets == 0 ||
293*4882a593Smuzhiyun cp->cp_unacked_bytes < len) {
294*4882a593Smuzhiyun set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun cp->cp_unacked_packets =
297*4882a593Smuzhiyun rds_sysctl_max_unacked_packets;
298*4882a593Smuzhiyun cp->cp_unacked_bytes =
299*4882a593Smuzhiyun rds_sysctl_max_unacked_bytes;
300*4882a593Smuzhiyun rds_stats_inc(s_send_ack_required);
301*4882a593Smuzhiyun } else {
302*4882a593Smuzhiyun cp->cp_unacked_bytes -= len;
303*4882a593Smuzhiyun cp->cp_unacked_packets--;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun cp->cp_xmit_rm = rm;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* The transport either sends the whole rdma or none of it */
310*4882a593Smuzhiyun if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
311*4882a593Smuzhiyun rm->m_final_op = &rm->rdma;
312*4882a593Smuzhiyun /* The transport owns the mapped memory for now.
313*4882a593Smuzhiyun * You can't unmap it while it's on the send queue
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun set_bit(RDS_MSG_MAPPED, &rm->m_flags);
316*4882a593Smuzhiyun ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
317*4882a593Smuzhiyun if (ret) {
318*4882a593Smuzhiyun clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
319*4882a593Smuzhiyun wake_up_interruptible(&rm->m_flush_wait);
320*4882a593Smuzhiyun break;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun cp->cp_xmit_rdma_sent = 1;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
327*4882a593Smuzhiyun rm->m_final_op = &rm->atomic;
328*4882a593Smuzhiyun /* The transport owns the mapped memory for now.
329*4882a593Smuzhiyun * You can't unmap it while it's on the send queue
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun set_bit(RDS_MSG_MAPPED, &rm->m_flags);
332*4882a593Smuzhiyun ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
333*4882a593Smuzhiyun if (ret) {
334*4882a593Smuzhiyun clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
335*4882a593Smuzhiyun wake_up_interruptible(&rm->m_flush_wait);
336*4882a593Smuzhiyun break;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun cp->cp_xmit_atomic_sent = 1;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /*
343*4882a593Smuzhiyun * A number of cases require an RDS header to be sent
344*4882a593Smuzhiyun * even if there is no data.
345*4882a593Smuzhiyun * We permit 0-byte sends; rds-ping depends on this.
346*4882a593Smuzhiyun * However, if there are exclusively attached silent ops,
347*4882a593Smuzhiyun * we skip the hdr/data send, to enable silent operation.
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun if (rm->data.op_nents == 0) {
350*4882a593Smuzhiyun int ops_present;
351*4882a593Smuzhiyun int all_ops_are_silent = 1;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun ops_present = (rm->atomic.op_active || rm->rdma.op_active);
354*4882a593Smuzhiyun if (rm->atomic.op_active && !rm->atomic.op_silent)
355*4882a593Smuzhiyun all_ops_are_silent = 0;
356*4882a593Smuzhiyun if (rm->rdma.op_active && !rm->rdma.op_silent)
357*4882a593Smuzhiyun all_ops_are_silent = 0;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (ops_present && all_ops_are_silent
360*4882a593Smuzhiyun && !rm->m_rdma_cookie)
361*4882a593Smuzhiyun rm->data.op_active = 0;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun if (rm->data.op_active && !cp->cp_xmit_data_sent) {
365*4882a593Smuzhiyun rm->m_final_op = &rm->data;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun ret = conn->c_trans->xmit(conn, rm,
368*4882a593Smuzhiyun cp->cp_xmit_hdr_off,
369*4882a593Smuzhiyun cp->cp_xmit_sg,
370*4882a593Smuzhiyun cp->cp_xmit_data_off);
371*4882a593Smuzhiyun if (ret <= 0)
372*4882a593Smuzhiyun break;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
375*4882a593Smuzhiyun tmp = min_t(int, ret,
376*4882a593Smuzhiyun sizeof(struct rds_header) -
377*4882a593Smuzhiyun cp->cp_xmit_hdr_off);
378*4882a593Smuzhiyun cp->cp_xmit_hdr_off += tmp;
379*4882a593Smuzhiyun ret -= tmp;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun sg = &rm->data.op_sg[cp->cp_xmit_sg];
383*4882a593Smuzhiyun while (ret) {
384*4882a593Smuzhiyun tmp = min_t(int, ret, sg->length -
385*4882a593Smuzhiyun cp->cp_xmit_data_off);
386*4882a593Smuzhiyun cp->cp_xmit_data_off += tmp;
387*4882a593Smuzhiyun ret -= tmp;
388*4882a593Smuzhiyun if (cp->cp_xmit_data_off == sg->length) {
389*4882a593Smuzhiyun cp->cp_xmit_data_off = 0;
390*4882a593Smuzhiyun sg++;
391*4882a593Smuzhiyun cp->cp_xmit_sg++;
392*4882a593Smuzhiyun BUG_ON(ret != 0 && cp->cp_xmit_sg ==
393*4882a593Smuzhiyun rm->data.op_nents);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
398*4882a593Smuzhiyun (cp->cp_xmit_sg == rm->data.op_nents))
399*4882a593Smuzhiyun cp->cp_xmit_data_sent = 1;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * A rm will only take multiple times through this loop
404*4882a593Smuzhiyun * if there is a data op. Thus, if the data is sent (or there was
405*4882a593Smuzhiyun * none), then we're done with the rm.
406*4882a593Smuzhiyun */
407*4882a593Smuzhiyun if (!rm->data.op_active || cp->cp_xmit_data_sent) {
408*4882a593Smuzhiyun cp->cp_xmit_rm = NULL;
409*4882a593Smuzhiyun cp->cp_xmit_sg = 0;
410*4882a593Smuzhiyun cp->cp_xmit_hdr_off = 0;
411*4882a593Smuzhiyun cp->cp_xmit_data_off = 0;
412*4882a593Smuzhiyun cp->cp_xmit_rdma_sent = 0;
413*4882a593Smuzhiyun cp->cp_xmit_atomic_sent = 0;
414*4882a593Smuzhiyun cp->cp_xmit_data_sent = 0;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun rds_message_put(rm);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun over_batch:
421*4882a593Smuzhiyun if (conn->c_trans->xmit_path_complete)
422*4882a593Smuzhiyun conn->c_trans->xmit_path_complete(cp);
423*4882a593Smuzhiyun release_in_xmit(cp);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /* Nuke any messages we decided not to retransmit. */
426*4882a593Smuzhiyun if (!list_empty(&to_be_dropped)) {
427*4882a593Smuzhiyun /* irqs on here, so we can put(), unlike above */
428*4882a593Smuzhiyun list_for_each_entry(rm, &to_be_dropped, m_conn_item)
429*4882a593Smuzhiyun rds_message_put(rm);
430*4882a593Smuzhiyun rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun * Other senders can queue a message after we last test the send queue
435*4882a593Smuzhiyun * but before we clear RDS_IN_XMIT. In that case they'd back off and
436*4882a593Smuzhiyun * not try and send their newly queued message. We need to check the
437*4882a593Smuzhiyun * send queue after having cleared RDS_IN_XMIT so that their message
438*4882a593Smuzhiyun * doesn't get stuck on the send queue.
439*4882a593Smuzhiyun *
440*4882a593Smuzhiyun * If the transport cannot continue (i.e ret != 0), then it must
441*4882a593Smuzhiyun * call us when more room is available, such as from the tx
442*4882a593Smuzhiyun * completion handler.
443*4882a593Smuzhiyun *
444*4882a593Smuzhiyun * We have an extra generation check here so that if someone manages
445*4882a593Smuzhiyun * to jump in after our release_in_xmit, we'll see that they have done
446*4882a593Smuzhiyun * some work and we will skip our goto
447*4882a593Smuzhiyun */
448*4882a593Smuzhiyun if (ret == 0) {
449*4882a593Smuzhiyun bool raced;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun smp_mb();
452*4882a593Smuzhiyun raced = send_gen != READ_ONCE(cp->cp_send_gen);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if ((test_bit(0, &conn->c_map_queued) ||
455*4882a593Smuzhiyun !list_empty(&cp->cp_send_queue)) && !raced) {
456*4882a593Smuzhiyun if (batch_count < send_batch_count)
457*4882a593Smuzhiyun goto restart;
458*4882a593Smuzhiyun rcu_read_lock();
459*4882a593Smuzhiyun if (rds_destroy_pending(cp->cp_conn))
460*4882a593Smuzhiyun ret = -ENETUNREACH;
461*4882a593Smuzhiyun else
462*4882a593Smuzhiyun queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
463*4882a593Smuzhiyun rcu_read_unlock();
464*4882a593Smuzhiyun } else if (raced) {
465*4882a593Smuzhiyun rds_stats_inc(s_send_lock_queue_raced);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun out:
469*4882a593Smuzhiyun return ret;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rds_send_xmit);
472*4882a593Smuzhiyun
rds_send_sndbuf_remove(struct rds_sock * rs,struct rds_message * rm)473*4882a593Smuzhiyun static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun assert_spin_locked(&rs->rs_lock);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun BUG_ON(rs->rs_snd_bytes < len);
480*4882a593Smuzhiyun rs->rs_snd_bytes -= len;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (rs->rs_snd_bytes == 0)
483*4882a593Smuzhiyun rds_stats_inc(s_send_queue_empty);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
rds_send_is_acked(struct rds_message * rm,u64 ack,is_acked_func is_acked)486*4882a593Smuzhiyun static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
487*4882a593Smuzhiyun is_acked_func is_acked)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun if (is_acked)
490*4882a593Smuzhiyun return is_acked(rm, ack);
491*4882a593Smuzhiyun return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun /*
495*4882a593Smuzhiyun * This is pretty similar to what happens below in the ACK
496*4882a593Smuzhiyun * handling code - except that we call here as soon as we get
497*4882a593Smuzhiyun * the IB send completion on the RDMA op and the accompanying
498*4882a593Smuzhiyun * message.
499*4882a593Smuzhiyun */
rds_rdma_send_complete(struct rds_message * rm,int status)500*4882a593Smuzhiyun void rds_rdma_send_complete(struct rds_message *rm, int status)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun struct rds_sock *rs = NULL;
503*4882a593Smuzhiyun struct rm_rdma_op *ro;
504*4882a593Smuzhiyun struct rds_notifier *notifier;
505*4882a593Smuzhiyun unsigned long flags;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun spin_lock_irqsave(&rm->m_rs_lock, flags);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun ro = &rm->rdma;
510*4882a593Smuzhiyun if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
511*4882a593Smuzhiyun ro->op_active && ro->op_notify && ro->op_notifier) {
512*4882a593Smuzhiyun notifier = ro->op_notifier;
513*4882a593Smuzhiyun rs = rm->m_rs;
514*4882a593Smuzhiyun sock_hold(rds_rs_to_sk(rs));
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun notifier->n_status = status;
517*4882a593Smuzhiyun spin_lock(&rs->rs_lock);
518*4882a593Smuzhiyun list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
519*4882a593Smuzhiyun spin_unlock(&rs->rs_lock);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun ro->op_notifier = NULL;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun spin_unlock_irqrestore(&rm->m_rs_lock, flags);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (rs) {
527*4882a593Smuzhiyun rds_wake_sk_sleep(rs);
528*4882a593Smuzhiyun sock_put(rds_rs_to_sk(rs));
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /*
534*4882a593Smuzhiyun * Just like above, except looks at atomic op
535*4882a593Smuzhiyun */
rds_atomic_send_complete(struct rds_message * rm,int status)536*4882a593Smuzhiyun void rds_atomic_send_complete(struct rds_message *rm, int status)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun struct rds_sock *rs = NULL;
539*4882a593Smuzhiyun struct rm_atomic_op *ao;
540*4882a593Smuzhiyun struct rds_notifier *notifier;
541*4882a593Smuzhiyun unsigned long flags;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun spin_lock_irqsave(&rm->m_rs_lock, flags);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun ao = &rm->atomic;
546*4882a593Smuzhiyun if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
547*4882a593Smuzhiyun && ao->op_active && ao->op_notify && ao->op_notifier) {
548*4882a593Smuzhiyun notifier = ao->op_notifier;
549*4882a593Smuzhiyun rs = rm->m_rs;
550*4882a593Smuzhiyun sock_hold(rds_rs_to_sk(rs));
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun notifier->n_status = status;
553*4882a593Smuzhiyun spin_lock(&rs->rs_lock);
554*4882a593Smuzhiyun list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
555*4882a593Smuzhiyun spin_unlock(&rs->rs_lock);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun ao->op_notifier = NULL;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun spin_unlock_irqrestore(&rm->m_rs_lock, flags);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun if (rs) {
563*4882a593Smuzhiyun rds_wake_sk_sleep(rs);
564*4882a593Smuzhiyun sock_put(rds_rs_to_sk(rs));
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /*
570*4882a593Smuzhiyun * This is the same as rds_rdma_send_complete except we
571*4882a593Smuzhiyun * don't do any locking - we have all the ingredients (message,
572*4882a593Smuzhiyun * socket, socket lock) and can just move the notifier.
573*4882a593Smuzhiyun */
574*4882a593Smuzhiyun static inline void
__rds_send_complete(struct rds_sock * rs,struct rds_message * rm,int status)575*4882a593Smuzhiyun __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun struct rm_rdma_op *ro;
578*4882a593Smuzhiyun struct rm_atomic_op *ao;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun ro = &rm->rdma;
581*4882a593Smuzhiyun if (ro->op_active && ro->op_notify && ro->op_notifier) {
582*4882a593Smuzhiyun ro->op_notifier->n_status = status;
583*4882a593Smuzhiyun list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
584*4882a593Smuzhiyun ro->op_notifier = NULL;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun ao = &rm->atomic;
588*4882a593Smuzhiyun if (ao->op_active && ao->op_notify && ao->op_notifier) {
589*4882a593Smuzhiyun ao->op_notifier->n_status = status;
590*4882a593Smuzhiyun list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
591*4882a593Smuzhiyun ao->op_notifier = NULL;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /* No need to wake the app - caller does this */
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /*
598*4882a593Smuzhiyun * This removes messages from the socket's list if they're on it. The list
599*4882a593Smuzhiyun * argument must be private to the caller, we must be able to modify it
600*4882a593Smuzhiyun * without locks. The messages must have a reference held for their
601*4882a593Smuzhiyun * position on the list. This function will drop that reference after
602*4882a593Smuzhiyun * removing the messages from the 'messages' list regardless of if it found
603*4882a593Smuzhiyun * the messages on the socket list or not.
604*4882a593Smuzhiyun */
rds_send_remove_from_sock(struct list_head * messages,int status)605*4882a593Smuzhiyun static void rds_send_remove_from_sock(struct list_head *messages, int status)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun unsigned long flags;
608*4882a593Smuzhiyun struct rds_sock *rs = NULL;
609*4882a593Smuzhiyun struct rds_message *rm;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun while (!list_empty(messages)) {
612*4882a593Smuzhiyun int was_on_sock = 0;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun rm = list_entry(messages->next, struct rds_message,
615*4882a593Smuzhiyun m_conn_item);
616*4882a593Smuzhiyun list_del_init(&rm->m_conn_item);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /*
619*4882a593Smuzhiyun * If we see this flag cleared then we're *sure* that someone
620*4882a593Smuzhiyun * else beat us to removing it from the sock. If we race
621*4882a593Smuzhiyun * with their flag update we'll get the lock and then really
622*4882a593Smuzhiyun * see that the flag has been cleared.
623*4882a593Smuzhiyun *
624*4882a593Smuzhiyun * The message spinlock makes sure nobody clears rm->m_rs
625*4882a593Smuzhiyun * while we're messing with it. It does not prevent the
626*4882a593Smuzhiyun * message from being removed from the socket, though.
627*4882a593Smuzhiyun */
628*4882a593Smuzhiyun spin_lock_irqsave(&rm->m_rs_lock, flags);
629*4882a593Smuzhiyun if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
630*4882a593Smuzhiyun goto unlock_and_drop;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun if (rs != rm->m_rs) {
633*4882a593Smuzhiyun if (rs) {
634*4882a593Smuzhiyun rds_wake_sk_sleep(rs);
635*4882a593Smuzhiyun sock_put(rds_rs_to_sk(rs));
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun rs = rm->m_rs;
638*4882a593Smuzhiyun if (rs)
639*4882a593Smuzhiyun sock_hold(rds_rs_to_sk(rs));
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun if (!rs)
642*4882a593Smuzhiyun goto unlock_and_drop;
643*4882a593Smuzhiyun spin_lock(&rs->rs_lock);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
646*4882a593Smuzhiyun struct rm_rdma_op *ro = &rm->rdma;
647*4882a593Smuzhiyun struct rds_notifier *notifier;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun list_del_init(&rm->m_sock_item);
650*4882a593Smuzhiyun rds_send_sndbuf_remove(rs, rm);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun if (ro->op_active && ro->op_notifier &&
653*4882a593Smuzhiyun (ro->op_notify || (ro->op_recverr && status))) {
654*4882a593Smuzhiyun notifier = ro->op_notifier;
655*4882a593Smuzhiyun list_add_tail(¬ifier->n_list,
656*4882a593Smuzhiyun &rs->rs_notify_queue);
657*4882a593Smuzhiyun if (!notifier->n_status)
658*4882a593Smuzhiyun notifier->n_status = status;
659*4882a593Smuzhiyun rm->rdma.op_notifier = NULL;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun was_on_sock = 1;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun spin_unlock(&rs->rs_lock);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun unlock_and_drop:
666*4882a593Smuzhiyun spin_unlock_irqrestore(&rm->m_rs_lock, flags);
667*4882a593Smuzhiyun rds_message_put(rm);
668*4882a593Smuzhiyun if (was_on_sock)
669*4882a593Smuzhiyun rds_message_put(rm);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (rs) {
673*4882a593Smuzhiyun rds_wake_sk_sleep(rs);
674*4882a593Smuzhiyun sock_put(rds_rs_to_sk(rs));
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /*
679*4882a593Smuzhiyun * Transports call here when they've determined that the receiver queued
680*4882a593Smuzhiyun * messages up to, and including, the given sequence number. Messages are
681*4882a593Smuzhiyun * moved to the retrans queue when rds_send_xmit picks them off the send
682*4882a593Smuzhiyun * queue. This means that in the TCP case, the message may not have been
683*4882a593Smuzhiyun * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
684*4882a593Smuzhiyun * checks the RDS_MSG_HAS_ACK_SEQ bit.
685*4882a593Smuzhiyun */
rds_send_path_drop_acked(struct rds_conn_path * cp,u64 ack,is_acked_func is_acked)686*4882a593Smuzhiyun void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
687*4882a593Smuzhiyun is_acked_func is_acked)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun struct rds_message *rm, *tmp;
690*4882a593Smuzhiyun unsigned long flags;
691*4882a593Smuzhiyun LIST_HEAD(list);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun spin_lock_irqsave(&cp->cp_lock, flags);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
696*4882a593Smuzhiyun if (!rds_send_is_acked(rm, ack, is_acked))
697*4882a593Smuzhiyun break;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun list_move(&rm->m_conn_item, &list);
700*4882a593Smuzhiyun clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /* order flag updates with spin locks */
704*4882a593Smuzhiyun if (!list_empty(&list))
705*4882a593Smuzhiyun smp_mb__after_atomic();
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun spin_unlock_irqrestore(&cp->cp_lock, flags);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /* now remove the messages from the sock list as needed */
710*4882a593Smuzhiyun rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
713*4882a593Smuzhiyun
rds_send_drop_acked(struct rds_connection * conn,u64 ack,is_acked_func is_acked)714*4882a593Smuzhiyun void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
715*4882a593Smuzhiyun is_acked_func is_acked)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun WARN_ON(conn->c_trans->t_mp_capable);
718*4882a593Smuzhiyun rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rds_send_drop_acked);
721*4882a593Smuzhiyun
rds_send_drop_to(struct rds_sock * rs,struct sockaddr_in6 * dest)722*4882a593Smuzhiyun void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun struct rds_message *rm, *tmp;
725*4882a593Smuzhiyun struct rds_connection *conn;
726*4882a593Smuzhiyun struct rds_conn_path *cp;
727*4882a593Smuzhiyun unsigned long flags;
728*4882a593Smuzhiyun LIST_HEAD(list);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun /* get all the messages we're dropping under the rs lock */
731*4882a593Smuzhiyun spin_lock_irqsave(&rs->rs_lock, flags);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
734*4882a593Smuzhiyun if (dest &&
735*4882a593Smuzhiyun (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) ||
736*4882a593Smuzhiyun dest->sin6_port != rm->m_inc.i_hdr.h_dport))
737*4882a593Smuzhiyun continue;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun list_move(&rm->m_sock_item, &list);
740*4882a593Smuzhiyun rds_send_sndbuf_remove(rs, rm);
741*4882a593Smuzhiyun clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun /* order flag updates with the rs lock */
745*4882a593Smuzhiyun smp_mb__after_atomic();
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun spin_unlock_irqrestore(&rs->rs_lock, flags);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun if (list_empty(&list))
750*4882a593Smuzhiyun return;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /* Remove the messages from the conn */
753*4882a593Smuzhiyun list_for_each_entry(rm, &list, m_sock_item) {
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun conn = rm->m_inc.i_conn;
756*4882a593Smuzhiyun if (conn->c_trans->t_mp_capable)
757*4882a593Smuzhiyun cp = rm->m_inc.i_conn_path;
758*4882a593Smuzhiyun else
759*4882a593Smuzhiyun cp = &conn->c_path[0];
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun spin_lock_irqsave(&cp->cp_lock, flags);
762*4882a593Smuzhiyun /*
763*4882a593Smuzhiyun * Maybe someone else beat us to removing rm from the conn.
764*4882a593Smuzhiyun * If we race with their flag update we'll get the lock and
765*4882a593Smuzhiyun * then really see that the flag has been cleared.
766*4882a593Smuzhiyun */
767*4882a593Smuzhiyun if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
768*4882a593Smuzhiyun spin_unlock_irqrestore(&cp->cp_lock, flags);
769*4882a593Smuzhiyun continue;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun list_del_init(&rm->m_conn_item);
772*4882a593Smuzhiyun spin_unlock_irqrestore(&cp->cp_lock, flags);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun /*
775*4882a593Smuzhiyun * Couldn't grab m_rs_lock in top loop (lock ordering),
776*4882a593Smuzhiyun * but we can now.
777*4882a593Smuzhiyun */
778*4882a593Smuzhiyun spin_lock_irqsave(&rm->m_rs_lock, flags);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun spin_lock(&rs->rs_lock);
781*4882a593Smuzhiyun __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
782*4882a593Smuzhiyun spin_unlock(&rs->rs_lock);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun spin_unlock_irqrestore(&rm->m_rs_lock, flags);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun rds_message_put(rm);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun rds_wake_sk_sleep(rs);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun while (!list_empty(&list)) {
792*4882a593Smuzhiyun rm = list_entry(list.next, struct rds_message, m_sock_item);
793*4882a593Smuzhiyun list_del_init(&rm->m_sock_item);
794*4882a593Smuzhiyun rds_message_wait(rm);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /* just in case the code above skipped this message
797*4882a593Smuzhiyun * because RDS_MSG_ON_CONN wasn't set, run it again here
798*4882a593Smuzhiyun * taking m_rs_lock is the only thing that keeps us
799*4882a593Smuzhiyun * from racing with ack processing.
800*4882a593Smuzhiyun */
801*4882a593Smuzhiyun spin_lock_irqsave(&rm->m_rs_lock, flags);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun spin_lock(&rs->rs_lock);
804*4882a593Smuzhiyun __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
805*4882a593Smuzhiyun spin_unlock(&rs->rs_lock);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun spin_unlock_irqrestore(&rm->m_rs_lock, flags);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun rds_message_put(rm);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /*
814*4882a593Smuzhiyun * we only want this to fire once so we use the callers 'queued'. It's
815*4882a593Smuzhiyun * possible that another thread can race with us and remove the
816*4882a593Smuzhiyun * message from the flow with RDS_CANCEL_SENT_TO.
817*4882a593Smuzhiyun */
rds_send_queue_rm(struct rds_sock * rs,struct rds_connection * conn,struct rds_conn_path * cp,struct rds_message * rm,__be16 sport,__be16 dport,int * queued)818*4882a593Smuzhiyun static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
819*4882a593Smuzhiyun struct rds_conn_path *cp,
820*4882a593Smuzhiyun struct rds_message *rm, __be16 sport,
821*4882a593Smuzhiyun __be16 dport, int *queued)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun unsigned long flags;
824*4882a593Smuzhiyun u32 len;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (*queued)
827*4882a593Smuzhiyun goto out;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* this is the only place which holds both the socket's rs_lock
832*4882a593Smuzhiyun * and the connection's c_lock */
833*4882a593Smuzhiyun spin_lock_irqsave(&rs->rs_lock, flags);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /*
836*4882a593Smuzhiyun * If there is a little space in sndbuf, we don't queue anything,
837*4882a593Smuzhiyun * and userspace gets -EAGAIN. But poll() indicates there's send
838*4882a593Smuzhiyun * room. This can lead to bad behavior (spinning) if snd_bytes isn't
839*4882a593Smuzhiyun * freed up by incoming acks. So we check the *old* value of
840*4882a593Smuzhiyun * rs_snd_bytes here to allow the last msg to exceed the buffer,
841*4882a593Smuzhiyun * and poll() now knows no more data can be sent.
842*4882a593Smuzhiyun */
843*4882a593Smuzhiyun if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
844*4882a593Smuzhiyun rs->rs_snd_bytes += len;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun /* let recv side know we are close to send space exhaustion.
847*4882a593Smuzhiyun * This is probably not the optimal way to do it, as this
848*4882a593Smuzhiyun * means we set the flag on *all* messages as soon as our
849*4882a593Smuzhiyun * throughput hits a certain threshold.
850*4882a593Smuzhiyun */
851*4882a593Smuzhiyun if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
852*4882a593Smuzhiyun set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
855*4882a593Smuzhiyun set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
856*4882a593Smuzhiyun rds_message_addref(rm);
857*4882a593Smuzhiyun sock_hold(rds_rs_to_sk(rs));
858*4882a593Smuzhiyun rm->m_rs = rs;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /* The code ordering is a little weird, but we're
861*4882a593Smuzhiyun trying to minimize the time we hold c_lock */
862*4882a593Smuzhiyun rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
863*4882a593Smuzhiyun rm->m_inc.i_conn = conn;
864*4882a593Smuzhiyun rm->m_inc.i_conn_path = cp;
865*4882a593Smuzhiyun rds_message_addref(rm);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun spin_lock(&cp->cp_lock);
868*4882a593Smuzhiyun rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
869*4882a593Smuzhiyun list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
870*4882a593Smuzhiyun set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
871*4882a593Smuzhiyun spin_unlock(&cp->cp_lock);
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
874*4882a593Smuzhiyun rm, len, rs, rs->rs_snd_bytes,
875*4882a593Smuzhiyun (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun *queued = 1;
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun spin_unlock_irqrestore(&rs->rs_lock, flags);
881*4882a593Smuzhiyun out:
882*4882a593Smuzhiyun return *queued;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /*
886*4882a593Smuzhiyun * rds_message is getting to be quite complicated, and we'd like to allocate
887*4882a593Smuzhiyun * it all in one go. This figures out how big it needs to be up front.
888*4882a593Smuzhiyun */
rds_rm_size(struct msghdr * msg,int num_sgs,struct rds_iov_vector_arr * vct)889*4882a593Smuzhiyun static int rds_rm_size(struct msghdr *msg, int num_sgs,
890*4882a593Smuzhiyun struct rds_iov_vector_arr *vct)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun struct cmsghdr *cmsg;
893*4882a593Smuzhiyun int size = 0;
894*4882a593Smuzhiyun int cmsg_groups = 0;
895*4882a593Smuzhiyun int retval;
896*4882a593Smuzhiyun bool zcopy_cookie = false;
897*4882a593Smuzhiyun struct rds_iov_vector *iov, *tmp_iov;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun if (num_sgs < 0)
900*4882a593Smuzhiyun return -EINVAL;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun for_each_cmsghdr(cmsg, msg) {
903*4882a593Smuzhiyun if (!CMSG_OK(msg, cmsg))
904*4882a593Smuzhiyun return -EINVAL;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun if (cmsg->cmsg_level != SOL_RDS)
907*4882a593Smuzhiyun continue;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun switch (cmsg->cmsg_type) {
910*4882a593Smuzhiyun case RDS_CMSG_RDMA_ARGS:
911*4882a593Smuzhiyun if (vct->indx >= vct->len) {
912*4882a593Smuzhiyun vct->len += vct->incr;
913*4882a593Smuzhiyun tmp_iov =
914*4882a593Smuzhiyun krealloc(vct->vec,
915*4882a593Smuzhiyun vct->len *
916*4882a593Smuzhiyun sizeof(struct rds_iov_vector),
917*4882a593Smuzhiyun GFP_KERNEL);
918*4882a593Smuzhiyun if (!tmp_iov) {
919*4882a593Smuzhiyun vct->len -= vct->incr;
920*4882a593Smuzhiyun return -ENOMEM;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun vct->vec = tmp_iov;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun iov = &vct->vec[vct->indx];
925*4882a593Smuzhiyun memset(iov, 0, sizeof(struct rds_iov_vector));
926*4882a593Smuzhiyun vct->indx++;
927*4882a593Smuzhiyun cmsg_groups |= 1;
928*4882a593Smuzhiyun retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
929*4882a593Smuzhiyun if (retval < 0)
930*4882a593Smuzhiyun return retval;
931*4882a593Smuzhiyun size += retval;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun break;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun case RDS_CMSG_ZCOPY_COOKIE:
936*4882a593Smuzhiyun zcopy_cookie = true;
937*4882a593Smuzhiyun fallthrough;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun case RDS_CMSG_RDMA_DEST:
940*4882a593Smuzhiyun case RDS_CMSG_RDMA_MAP:
941*4882a593Smuzhiyun cmsg_groups |= 2;
942*4882a593Smuzhiyun /* these are valid but do no add any size */
943*4882a593Smuzhiyun break;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun case RDS_CMSG_ATOMIC_CSWP:
946*4882a593Smuzhiyun case RDS_CMSG_ATOMIC_FADD:
947*4882a593Smuzhiyun case RDS_CMSG_MASKED_ATOMIC_CSWP:
948*4882a593Smuzhiyun case RDS_CMSG_MASKED_ATOMIC_FADD:
949*4882a593Smuzhiyun cmsg_groups |= 1;
950*4882a593Smuzhiyun size += sizeof(struct scatterlist);
951*4882a593Smuzhiyun break;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun default:
954*4882a593Smuzhiyun return -EINVAL;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun if ((msg->msg_flags & MSG_ZEROCOPY) && !zcopy_cookie)
960*4882a593Smuzhiyun return -EINVAL;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun size += num_sgs * sizeof(struct scatterlist);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
965*4882a593Smuzhiyun if (cmsg_groups == 3)
966*4882a593Smuzhiyun return -EINVAL;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun return size;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
rds_cmsg_zcopy(struct rds_sock * rs,struct rds_message * rm,struct cmsghdr * cmsg)971*4882a593Smuzhiyun static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
972*4882a593Smuzhiyun struct cmsghdr *cmsg)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun u32 *cookie;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun if (cmsg->cmsg_len < CMSG_LEN(sizeof(*cookie)) ||
977*4882a593Smuzhiyun !rm->data.op_mmp_znotifier)
978*4882a593Smuzhiyun return -EINVAL;
979*4882a593Smuzhiyun cookie = CMSG_DATA(cmsg);
980*4882a593Smuzhiyun rm->data.op_mmp_znotifier->z_cookie = *cookie;
981*4882a593Smuzhiyun return 0;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
rds_cmsg_send(struct rds_sock * rs,struct rds_message * rm,struct msghdr * msg,int * allocated_mr,struct rds_iov_vector_arr * vct)984*4882a593Smuzhiyun static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
985*4882a593Smuzhiyun struct msghdr *msg, int *allocated_mr,
986*4882a593Smuzhiyun struct rds_iov_vector_arr *vct)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun struct cmsghdr *cmsg;
989*4882a593Smuzhiyun int ret = 0, ind = 0;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun for_each_cmsghdr(cmsg, msg) {
992*4882a593Smuzhiyun if (!CMSG_OK(msg, cmsg))
993*4882a593Smuzhiyun return -EINVAL;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun if (cmsg->cmsg_level != SOL_RDS)
996*4882a593Smuzhiyun continue;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun /* As a side effect, RDMA_DEST and RDMA_MAP will set
999*4882a593Smuzhiyun * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
1000*4882a593Smuzhiyun */
1001*4882a593Smuzhiyun switch (cmsg->cmsg_type) {
1002*4882a593Smuzhiyun case RDS_CMSG_RDMA_ARGS:
1003*4882a593Smuzhiyun if (ind >= vct->indx)
1004*4882a593Smuzhiyun return -ENOMEM;
1005*4882a593Smuzhiyun ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
1006*4882a593Smuzhiyun ind++;
1007*4882a593Smuzhiyun break;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun case RDS_CMSG_RDMA_DEST:
1010*4882a593Smuzhiyun ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
1011*4882a593Smuzhiyun break;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun case RDS_CMSG_RDMA_MAP:
1014*4882a593Smuzhiyun ret = rds_cmsg_rdma_map(rs, rm, cmsg);
1015*4882a593Smuzhiyun if (!ret)
1016*4882a593Smuzhiyun *allocated_mr = 1;
1017*4882a593Smuzhiyun else if (ret == -ENODEV)
1018*4882a593Smuzhiyun /* Accommodate the get_mr() case which can fail
1019*4882a593Smuzhiyun * if connection isn't established yet.
1020*4882a593Smuzhiyun */
1021*4882a593Smuzhiyun ret = -EAGAIN;
1022*4882a593Smuzhiyun break;
1023*4882a593Smuzhiyun case RDS_CMSG_ATOMIC_CSWP:
1024*4882a593Smuzhiyun case RDS_CMSG_ATOMIC_FADD:
1025*4882a593Smuzhiyun case RDS_CMSG_MASKED_ATOMIC_CSWP:
1026*4882a593Smuzhiyun case RDS_CMSG_MASKED_ATOMIC_FADD:
1027*4882a593Smuzhiyun ret = rds_cmsg_atomic(rs, rm, cmsg);
1028*4882a593Smuzhiyun break;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun case RDS_CMSG_ZCOPY_COOKIE:
1031*4882a593Smuzhiyun ret = rds_cmsg_zcopy(rs, rm, cmsg);
1032*4882a593Smuzhiyun break;
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun default:
1035*4882a593Smuzhiyun return -EINVAL;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun if (ret)
1039*4882a593Smuzhiyun break;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun return ret;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
rds_send_mprds_hash(struct rds_sock * rs,struct rds_connection * conn,int nonblock)1045*4882a593Smuzhiyun static int rds_send_mprds_hash(struct rds_sock *rs,
1046*4882a593Smuzhiyun struct rds_connection *conn, int nonblock)
1047*4882a593Smuzhiyun {
1048*4882a593Smuzhiyun int hash;
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun if (conn->c_npaths == 0)
1051*4882a593Smuzhiyun hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS);
1052*4882a593Smuzhiyun else
1053*4882a593Smuzhiyun hash = RDS_MPATH_HASH(rs, conn->c_npaths);
1054*4882a593Smuzhiyun if (conn->c_npaths == 0 && hash != 0) {
1055*4882a593Smuzhiyun rds_send_ping(conn, 0);
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun /* The underlying connection is not up yet. Need to wait
1058*4882a593Smuzhiyun * until it is up to be sure that the non-zero c_path can be
1059*4882a593Smuzhiyun * used. But if we are interrupted, we have to use the zero
1060*4882a593Smuzhiyun * c_path in case the connection ends up being non-MP capable.
1061*4882a593Smuzhiyun */
1062*4882a593Smuzhiyun if (conn->c_npaths == 0) {
1063*4882a593Smuzhiyun /* Cannot wait for the connection be made, so just use
1064*4882a593Smuzhiyun * the base c_path.
1065*4882a593Smuzhiyun */
1066*4882a593Smuzhiyun if (nonblock)
1067*4882a593Smuzhiyun return 0;
1068*4882a593Smuzhiyun if (wait_event_interruptible(conn->c_hs_waitq,
1069*4882a593Smuzhiyun conn->c_npaths != 0))
1070*4882a593Smuzhiyun hash = 0;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun if (conn->c_npaths == 1)
1073*4882a593Smuzhiyun hash = 0;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun return hash;
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun
rds_rdma_bytes(struct msghdr * msg,size_t * rdma_bytes)1078*4882a593Smuzhiyun static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun struct rds_rdma_args *args;
1081*4882a593Smuzhiyun struct cmsghdr *cmsg;
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun for_each_cmsghdr(cmsg, msg) {
1084*4882a593Smuzhiyun if (!CMSG_OK(msg, cmsg))
1085*4882a593Smuzhiyun return -EINVAL;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun if (cmsg->cmsg_level != SOL_RDS)
1088*4882a593Smuzhiyun continue;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
1091*4882a593Smuzhiyun if (cmsg->cmsg_len <
1092*4882a593Smuzhiyun CMSG_LEN(sizeof(struct rds_rdma_args)))
1093*4882a593Smuzhiyun return -EINVAL;
1094*4882a593Smuzhiyun args = CMSG_DATA(cmsg);
1095*4882a593Smuzhiyun *rdma_bytes += args->remote_vec.bytes;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun return 0;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
rds_sendmsg(struct socket * sock,struct msghdr * msg,size_t payload_len)1101*4882a593Smuzhiyun int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1102*4882a593Smuzhiyun {
1103*4882a593Smuzhiyun struct sock *sk = sock->sk;
1104*4882a593Smuzhiyun struct rds_sock *rs = rds_sk_to_rs(sk);
1105*4882a593Smuzhiyun DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1106*4882a593Smuzhiyun DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
1107*4882a593Smuzhiyun __be16 dport;
1108*4882a593Smuzhiyun struct rds_message *rm = NULL;
1109*4882a593Smuzhiyun struct rds_connection *conn;
1110*4882a593Smuzhiyun int ret = 0;
1111*4882a593Smuzhiyun int queued = 0, allocated_mr = 0;
1112*4882a593Smuzhiyun int nonblock = msg->msg_flags & MSG_DONTWAIT;
1113*4882a593Smuzhiyun long timeo = sock_sndtimeo(sk, nonblock);
1114*4882a593Smuzhiyun struct rds_conn_path *cpath;
1115*4882a593Smuzhiyun struct in6_addr daddr;
1116*4882a593Smuzhiyun __u32 scope_id = 0;
1117*4882a593Smuzhiyun size_t total_payload_len = payload_len, rdma_payload_len = 0;
1118*4882a593Smuzhiyun bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
1119*4882a593Smuzhiyun sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
1120*4882a593Smuzhiyun int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
1121*4882a593Smuzhiyun int namelen;
1122*4882a593Smuzhiyun struct rds_iov_vector_arr vct;
1123*4882a593Smuzhiyun int ind;
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun memset(&vct, 0, sizeof(vct));
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
1128*4882a593Smuzhiyun vct.incr = 1;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun /* Mirror Linux UDP mirror of BSD error message compatibility */
1131*4882a593Smuzhiyun /* XXX: Perhaps MSG_MORE someday */
1132*4882a593Smuzhiyun if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT | MSG_ZEROCOPY)) {
1133*4882a593Smuzhiyun ret = -EOPNOTSUPP;
1134*4882a593Smuzhiyun goto out;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun namelen = msg->msg_namelen;
1138*4882a593Smuzhiyun if (namelen != 0) {
1139*4882a593Smuzhiyun if (namelen < sizeof(*usin)) {
1140*4882a593Smuzhiyun ret = -EINVAL;
1141*4882a593Smuzhiyun goto out;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun switch (usin->sin_family) {
1144*4882a593Smuzhiyun case AF_INET:
1145*4882a593Smuzhiyun if (usin->sin_addr.s_addr == htonl(INADDR_ANY) ||
1146*4882a593Smuzhiyun usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
1147*4882a593Smuzhiyun ipv4_is_multicast(usin->sin_addr.s_addr)) {
1148*4882a593Smuzhiyun ret = -EINVAL;
1149*4882a593Smuzhiyun goto out;
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun ipv6_addr_set_v4mapped(usin->sin_addr.s_addr, &daddr);
1152*4882a593Smuzhiyun dport = usin->sin_port;
1153*4882a593Smuzhiyun break;
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
1156*4882a593Smuzhiyun case AF_INET6: {
1157*4882a593Smuzhiyun int addr_type;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun if (namelen < sizeof(*sin6)) {
1160*4882a593Smuzhiyun ret = -EINVAL;
1161*4882a593Smuzhiyun goto out;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun addr_type = ipv6_addr_type(&sin6->sin6_addr);
1164*4882a593Smuzhiyun if (!(addr_type & IPV6_ADDR_UNICAST)) {
1165*4882a593Smuzhiyun __be32 addr4;
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun if (!(addr_type & IPV6_ADDR_MAPPED)) {
1168*4882a593Smuzhiyun ret = -EINVAL;
1169*4882a593Smuzhiyun goto out;
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun /* It is a mapped address. Need to do some
1173*4882a593Smuzhiyun * sanity checks.
1174*4882a593Smuzhiyun */
1175*4882a593Smuzhiyun addr4 = sin6->sin6_addr.s6_addr32[3];
1176*4882a593Smuzhiyun if (addr4 == htonl(INADDR_ANY) ||
1177*4882a593Smuzhiyun addr4 == htonl(INADDR_BROADCAST) ||
1178*4882a593Smuzhiyun ipv4_is_multicast(addr4)) {
1179*4882a593Smuzhiyun ret = -EINVAL;
1180*4882a593Smuzhiyun goto out;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun if (addr_type & IPV6_ADDR_LINKLOCAL) {
1184*4882a593Smuzhiyun if (sin6->sin6_scope_id == 0) {
1185*4882a593Smuzhiyun ret = -EINVAL;
1186*4882a593Smuzhiyun goto out;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun scope_id = sin6->sin6_scope_id;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun daddr = sin6->sin6_addr;
1192*4882a593Smuzhiyun dport = sin6->sin6_port;
1193*4882a593Smuzhiyun break;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun #endif
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun default:
1198*4882a593Smuzhiyun ret = -EINVAL;
1199*4882a593Smuzhiyun goto out;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun } else {
1202*4882a593Smuzhiyun /* We only care about consistency with ->connect() */
1203*4882a593Smuzhiyun lock_sock(sk);
1204*4882a593Smuzhiyun daddr = rs->rs_conn_addr;
1205*4882a593Smuzhiyun dport = rs->rs_conn_port;
1206*4882a593Smuzhiyun scope_id = rs->rs_bound_scope_id;
1207*4882a593Smuzhiyun release_sock(sk);
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun lock_sock(sk);
1211*4882a593Smuzhiyun if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) {
1212*4882a593Smuzhiyun release_sock(sk);
1213*4882a593Smuzhiyun ret = -ENOTCONN;
1214*4882a593Smuzhiyun goto out;
1215*4882a593Smuzhiyun } else if (namelen != 0) {
1216*4882a593Smuzhiyun /* Cannot send to an IPv4 address using an IPv6 source
1217*4882a593Smuzhiyun * address and cannot send to an IPv6 address using an
1218*4882a593Smuzhiyun * IPv4 source address.
1219*4882a593Smuzhiyun */
1220*4882a593Smuzhiyun if (ipv6_addr_v4mapped(&daddr) ^
1221*4882a593Smuzhiyun ipv6_addr_v4mapped(&rs->rs_bound_addr)) {
1222*4882a593Smuzhiyun release_sock(sk);
1223*4882a593Smuzhiyun ret = -EOPNOTSUPP;
1224*4882a593Smuzhiyun goto out;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun /* If the socket is already bound to a link local address,
1227*4882a593Smuzhiyun * it can only send to peers on the same link. But allow
1228*4882a593Smuzhiyun * communicating beween link local and non-link local address.
1229*4882a593Smuzhiyun */
1230*4882a593Smuzhiyun if (scope_id != rs->rs_bound_scope_id) {
1231*4882a593Smuzhiyun if (!scope_id) {
1232*4882a593Smuzhiyun scope_id = rs->rs_bound_scope_id;
1233*4882a593Smuzhiyun } else if (rs->rs_bound_scope_id) {
1234*4882a593Smuzhiyun release_sock(sk);
1235*4882a593Smuzhiyun ret = -EINVAL;
1236*4882a593Smuzhiyun goto out;
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun release_sock(sk);
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun ret = rds_rdma_bytes(msg, &rdma_payload_len);
1243*4882a593Smuzhiyun if (ret)
1244*4882a593Smuzhiyun goto out;
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun total_payload_len += rdma_payload_len;
1247*4882a593Smuzhiyun if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
1248*4882a593Smuzhiyun ret = -EMSGSIZE;
1249*4882a593Smuzhiyun goto out;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun if (payload_len > rds_sk_sndbuf(rs)) {
1253*4882a593Smuzhiyun ret = -EMSGSIZE;
1254*4882a593Smuzhiyun goto out;
1255*4882a593Smuzhiyun }
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun if (zcopy) {
1258*4882a593Smuzhiyun if (rs->rs_transport->t_type != RDS_TRANS_TCP) {
1259*4882a593Smuzhiyun ret = -EOPNOTSUPP;
1260*4882a593Smuzhiyun goto out;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun /* size of rm including all sgs */
1265*4882a593Smuzhiyun ret = rds_rm_size(msg, num_sgs, &vct);
1266*4882a593Smuzhiyun if (ret < 0)
1267*4882a593Smuzhiyun goto out;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun rm = rds_message_alloc(ret, GFP_KERNEL);
1270*4882a593Smuzhiyun if (!rm) {
1271*4882a593Smuzhiyun ret = -ENOMEM;
1272*4882a593Smuzhiyun goto out;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun /* Attach data to the rm */
1276*4882a593Smuzhiyun if (payload_len) {
1277*4882a593Smuzhiyun rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
1278*4882a593Smuzhiyun if (IS_ERR(rm->data.op_sg)) {
1279*4882a593Smuzhiyun ret = PTR_ERR(rm->data.op_sg);
1280*4882a593Smuzhiyun goto out;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
1283*4882a593Smuzhiyun if (ret)
1284*4882a593Smuzhiyun goto out;
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun rm->data.op_active = 1;
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun rm->m_daddr = daddr;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun /* rds_conn_create has a spinlock that runs with IRQ off.
1291*4882a593Smuzhiyun * Caching the conn in the socket helps a lot. */
1292*4882a593Smuzhiyun if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) &&
1293*4882a593Smuzhiyun rs->rs_tos == rs->rs_conn->c_tos) {
1294*4882a593Smuzhiyun conn = rs->rs_conn;
1295*4882a593Smuzhiyun } else {
1296*4882a593Smuzhiyun conn = rds_conn_create_outgoing(sock_net(sock->sk),
1297*4882a593Smuzhiyun &rs->rs_bound_addr, &daddr,
1298*4882a593Smuzhiyun rs->rs_transport, rs->rs_tos,
1299*4882a593Smuzhiyun sock->sk->sk_allocation,
1300*4882a593Smuzhiyun scope_id);
1301*4882a593Smuzhiyun if (IS_ERR(conn)) {
1302*4882a593Smuzhiyun ret = PTR_ERR(conn);
1303*4882a593Smuzhiyun goto out;
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun rs->rs_conn = conn;
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun if (conn->c_trans->t_mp_capable)
1309*4882a593Smuzhiyun cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
1310*4882a593Smuzhiyun else
1311*4882a593Smuzhiyun cpath = &conn->c_path[0];
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun rm->m_conn_path = cpath;
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun /* Parse any control messages the user may have included. */
1316*4882a593Smuzhiyun ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
1317*4882a593Smuzhiyun if (ret) {
1318*4882a593Smuzhiyun /* Trigger connection so that its ready for the next retry */
1319*4882a593Smuzhiyun if (ret == -EAGAIN)
1320*4882a593Smuzhiyun rds_conn_connect_if_down(conn);
1321*4882a593Smuzhiyun goto out;
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1325*4882a593Smuzhiyun printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1326*4882a593Smuzhiyun &rm->rdma, conn->c_trans->xmit_rdma);
1327*4882a593Smuzhiyun ret = -EOPNOTSUPP;
1328*4882a593Smuzhiyun goto out;
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1332*4882a593Smuzhiyun printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1333*4882a593Smuzhiyun &rm->atomic, conn->c_trans->xmit_atomic);
1334*4882a593Smuzhiyun ret = -EOPNOTSUPP;
1335*4882a593Smuzhiyun goto out;
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun if (rds_destroy_pending(conn)) {
1339*4882a593Smuzhiyun ret = -EAGAIN;
1340*4882a593Smuzhiyun goto out;
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun if (rds_conn_path_down(cpath))
1344*4882a593Smuzhiyun rds_check_all_paths(conn);
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1347*4882a593Smuzhiyun if (ret) {
1348*4882a593Smuzhiyun rs->rs_seen_congestion = 1;
1349*4882a593Smuzhiyun goto out;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
1352*4882a593Smuzhiyun dport, &queued)) {
1353*4882a593Smuzhiyun rds_stats_inc(s_send_queue_full);
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun if (nonblock) {
1356*4882a593Smuzhiyun ret = -EAGAIN;
1357*4882a593Smuzhiyun goto out;
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1361*4882a593Smuzhiyun rds_send_queue_rm(rs, conn, cpath, rm,
1362*4882a593Smuzhiyun rs->rs_bound_port,
1363*4882a593Smuzhiyun dport,
1364*4882a593Smuzhiyun &queued),
1365*4882a593Smuzhiyun timeo);
1366*4882a593Smuzhiyun rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1367*4882a593Smuzhiyun if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1368*4882a593Smuzhiyun continue;
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun ret = timeo;
1371*4882a593Smuzhiyun if (ret == 0)
1372*4882a593Smuzhiyun ret = -ETIMEDOUT;
1373*4882a593Smuzhiyun goto out;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun /*
1377*4882a593Smuzhiyun * By now we've committed to the send. We reuse rds_send_worker()
1378*4882a593Smuzhiyun * to retry sends in the rds thread if the transport asks us to.
1379*4882a593Smuzhiyun */
1380*4882a593Smuzhiyun rds_stats_inc(s_send_queued);
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun ret = rds_send_xmit(cpath);
1383*4882a593Smuzhiyun if (ret == -ENOMEM || ret == -EAGAIN) {
1384*4882a593Smuzhiyun ret = 0;
1385*4882a593Smuzhiyun rcu_read_lock();
1386*4882a593Smuzhiyun if (rds_destroy_pending(cpath->cp_conn))
1387*4882a593Smuzhiyun ret = -ENETUNREACH;
1388*4882a593Smuzhiyun else
1389*4882a593Smuzhiyun queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
1390*4882a593Smuzhiyun rcu_read_unlock();
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun if (ret)
1393*4882a593Smuzhiyun goto out;
1394*4882a593Smuzhiyun rds_message_put(rm);
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun for (ind = 0; ind < vct.indx; ind++)
1397*4882a593Smuzhiyun kfree(vct.vec[ind].iov);
1398*4882a593Smuzhiyun kfree(vct.vec);
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun return payload_len;
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun out:
1403*4882a593Smuzhiyun for (ind = 0; ind < vct.indx; ind++)
1404*4882a593Smuzhiyun kfree(vct.vec[ind].iov);
1405*4882a593Smuzhiyun kfree(vct.vec);
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1408*4882a593Smuzhiyun * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1409*4882a593Smuzhiyun * or in any other way, we need to destroy the MR again */
1410*4882a593Smuzhiyun if (allocated_mr)
1411*4882a593Smuzhiyun rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun if (rm)
1414*4882a593Smuzhiyun rds_message_put(rm);
1415*4882a593Smuzhiyun return ret;
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /*
1419*4882a593Smuzhiyun * send out a probe. Can be shared by rds_send_ping,
1420*4882a593Smuzhiyun * rds_send_pong, rds_send_hb.
1421*4882a593Smuzhiyun * rds_send_hb should use h_flags
1422*4882a593Smuzhiyun * RDS_FLAG_HB_PING|RDS_FLAG_ACK_REQUIRED
1423*4882a593Smuzhiyun * or
1424*4882a593Smuzhiyun * RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED
1425*4882a593Smuzhiyun */
1426*4882a593Smuzhiyun static int
rds_send_probe(struct rds_conn_path * cp,__be16 sport,__be16 dport,u8 h_flags)1427*4882a593Smuzhiyun rds_send_probe(struct rds_conn_path *cp, __be16 sport,
1428*4882a593Smuzhiyun __be16 dport, u8 h_flags)
1429*4882a593Smuzhiyun {
1430*4882a593Smuzhiyun struct rds_message *rm;
1431*4882a593Smuzhiyun unsigned long flags;
1432*4882a593Smuzhiyun int ret = 0;
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun rm = rds_message_alloc(0, GFP_ATOMIC);
1435*4882a593Smuzhiyun if (!rm) {
1436*4882a593Smuzhiyun ret = -ENOMEM;
1437*4882a593Smuzhiyun goto out;
1438*4882a593Smuzhiyun }
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun rm->m_daddr = cp->cp_conn->c_faddr;
1441*4882a593Smuzhiyun rm->data.op_active = 1;
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun rds_conn_path_connect_if_down(cp);
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
1446*4882a593Smuzhiyun if (ret)
1447*4882a593Smuzhiyun goto out;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun spin_lock_irqsave(&cp->cp_lock, flags);
1450*4882a593Smuzhiyun list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
1451*4882a593Smuzhiyun set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1452*4882a593Smuzhiyun rds_message_addref(rm);
1453*4882a593Smuzhiyun rm->m_inc.i_conn = cp->cp_conn;
1454*4882a593Smuzhiyun rm->m_inc.i_conn_path = cp;
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
1457*4882a593Smuzhiyun cp->cp_next_tx_seq);
1458*4882a593Smuzhiyun rm->m_inc.i_hdr.h_flags |= h_flags;
1459*4882a593Smuzhiyun cp->cp_next_tx_seq++;
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) &&
1462*4882a593Smuzhiyun cp->cp_conn->c_trans->t_mp_capable) {
1463*4882a593Smuzhiyun u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
1464*4882a593Smuzhiyun u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun rds_message_add_extension(&rm->m_inc.i_hdr,
1467*4882a593Smuzhiyun RDS_EXTHDR_NPATHS, &npaths,
1468*4882a593Smuzhiyun sizeof(npaths));
1469*4882a593Smuzhiyun rds_message_add_extension(&rm->m_inc.i_hdr,
1470*4882a593Smuzhiyun RDS_EXTHDR_GEN_NUM,
1471*4882a593Smuzhiyun &my_gen_num,
1472*4882a593Smuzhiyun sizeof(u32));
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun spin_unlock_irqrestore(&cp->cp_lock, flags);
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun rds_stats_inc(s_send_queued);
1477*4882a593Smuzhiyun rds_stats_inc(s_send_pong);
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun /* schedule the send work on rds_wq */
1480*4882a593Smuzhiyun rcu_read_lock();
1481*4882a593Smuzhiyun if (!rds_destroy_pending(cp->cp_conn))
1482*4882a593Smuzhiyun queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1483*4882a593Smuzhiyun rcu_read_unlock();
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun rds_message_put(rm);
1486*4882a593Smuzhiyun return 0;
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun out:
1489*4882a593Smuzhiyun if (rm)
1490*4882a593Smuzhiyun rds_message_put(rm);
1491*4882a593Smuzhiyun return ret;
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun int
rds_send_pong(struct rds_conn_path * cp,__be16 dport)1495*4882a593Smuzhiyun rds_send_pong(struct rds_conn_path *cp, __be16 dport)
1496*4882a593Smuzhiyun {
1497*4882a593Smuzhiyun return rds_send_probe(cp, 0, dport, 0);
1498*4882a593Smuzhiyun }
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun void
rds_send_ping(struct rds_connection * conn,int cp_index)1501*4882a593Smuzhiyun rds_send_ping(struct rds_connection *conn, int cp_index)
1502*4882a593Smuzhiyun {
1503*4882a593Smuzhiyun unsigned long flags;
1504*4882a593Smuzhiyun struct rds_conn_path *cp = &conn->c_path[cp_index];
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun spin_lock_irqsave(&cp->cp_lock, flags);
1507*4882a593Smuzhiyun if (conn->c_ping_triggered) {
1508*4882a593Smuzhiyun spin_unlock_irqrestore(&cp->cp_lock, flags);
1509*4882a593Smuzhiyun return;
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun conn->c_ping_triggered = 1;
1512*4882a593Smuzhiyun spin_unlock_irqrestore(&cp->cp_lock, flags);
1513*4882a593Smuzhiyun rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0);
1514*4882a593Smuzhiyun }
1515*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rds_send_ping);
1516