xref: /OK3568_Linux_fs/kernel/net/rds/cong.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun  * OpenIB.org BSD license below:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
12*4882a593Smuzhiyun  *     conditions are met:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
15*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun  *        disclaimer.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun  *        provided with the distribution.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun  * SOFTWARE.
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun #include <linux/slab.h>
34*4882a593Smuzhiyun #include <linux/types.h>
35*4882a593Smuzhiyun #include <linux/rbtree.h>
36*4882a593Smuzhiyun #include <linux/bitops.h>
37*4882a593Smuzhiyun #include <linux/export.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include "rds.h"
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun  * This file implements the receive side of the unconventional congestion
43*4882a593Smuzhiyun  * management in RDS.
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  * Messages waiting in the receive queue on the receiving socket are accounted
46*4882a593Smuzhiyun  * against the sockets SO_RCVBUF option value.  Only the payload bytes in the
47*4882a593Smuzhiyun  * message are accounted for.  If the number of bytes queued equals or exceeds
48*4882a593Smuzhiyun  * rcvbuf then the socket is congested.  All sends attempted to this socket's
49*4882a593Smuzhiyun  * address should return block or return -EWOULDBLOCK.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * Applications are expected to be reasonably tuned such that this situation
52*4882a593Smuzhiyun  * very rarely occurs.  An application encountering this "back-pressure" is
53*4882a593Smuzhiyun  * considered a bug.
54*4882a593Smuzhiyun  *
55*4882a593Smuzhiyun  * This is implemented by having each node maintain bitmaps which indicate
56*4882a593Smuzhiyun  * which ports on bound addresses are congested.  As the bitmap changes it is
57*4882a593Smuzhiyun  * sent through all the connections which terminate in the local address of the
58*4882a593Smuzhiyun  * bitmap which changed.
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * The bitmaps are allocated as connections are brought up.  This avoids
61*4882a593Smuzhiyun  * allocation in the interrupt handling path which queues messages on sockets.
62*4882a593Smuzhiyun  * The dense bitmaps let transports send the entire bitmap on any bitmap change
63*4882a593Smuzhiyun  * reasonably efficiently.  This is much easier to implement than some
64*4882a593Smuzhiyun  * finer-grained communication of per-port congestion.  The sender does a very
65*4882a593Smuzhiyun  * inexpensive bit test to test if the port it's about to send to is congested
66*4882a593Smuzhiyun  * or not.
67*4882a593Smuzhiyun  */
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun  * Interaction with poll is a tad tricky. We want all processes stuck in
71*4882a593Smuzhiyun  * poll to wake up and check whether a congested destination became uncongested.
72*4882a593Smuzhiyun  * The really sad thing is we have no idea which destinations the application
73*4882a593Smuzhiyun  * wants to send to - we don't even know which rds_connections are involved.
74*4882a593Smuzhiyun  * So until we implement a more flexible rds poll interface, we have to make
75*4882a593Smuzhiyun  * do with this:
76*4882a593Smuzhiyun  * We maintain a global counter that is incremented each time a congestion map
77*4882a593Smuzhiyun  * update is received. Each rds socket tracks this value, and if rds_poll
78*4882a593Smuzhiyun  * finds that the saved generation number is smaller than the global generation
79*4882a593Smuzhiyun  * number, it wakes up the process.
80*4882a593Smuzhiyun  */
81*4882a593Smuzhiyun static atomic_t		rds_cong_generation = ATOMIC_INIT(0);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun  * Congestion monitoring
85*4882a593Smuzhiyun  */
86*4882a593Smuzhiyun static LIST_HEAD(rds_cong_monitor);
87*4882a593Smuzhiyun static DEFINE_RWLOCK(rds_cong_monitor_lock);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun  * Yes, a global lock.  It's used so infrequently that it's worth keeping it
91*4882a593Smuzhiyun  * global to simplify the locking.  It's only used in the following
92*4882a593Smuzhiyun  * circumstances:
93*4882a593Smuzhiyun  *
94*4882a593Smuzhiyun  *  - on connection buildup to associate a conn with its maps
95*4882a593Smuzhiyun  *  - on map changes to inform conns of a new map to send
96*4882a593Smuzhiyun  *
97*4882a593Smuzhiyun  *  It's sadly ordered under the socket callback lock and the connection lock.
98*4882a593Smuzhiyun  *  Receive paths can mark ports congested from interrupt context so the
99*4882a593Smuzhiyun  *  lock masks interrupts.
100*4882a593Smuzhiyun  */
101*4882a593Smuzhiyun static DEFINE_SPINLOCK(rds_cong_lock);
102*4882a593Smuzhiyun static struct rb_root rds_cong_tree = RB_ROOT;
103*4882a593Smuzhiyun 
rds_cong_tree_walk(const struct in6_addr * addr,struct rds_cong_map * insert)104*4882a593Smuzhiyun static struct rds_cong_map *rds_cong_tree_walk(const struct in6_addr *addr,
105*4882a593Smuzhiyun 					       struct rds_cong_map *insert)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct rb_node **p = &rds_cong_tree.rb_node;
108*4882a593Smuzhiyun 	struct rb_node *parent = NULL;
109*4882a593Smuzhiyun 	struct rds_cong_map *map;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	while (*p) {
112*4882a593Smuzhiyun 		int diff;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 		parent = *p;
115*4882a593Smuzhiyun 		map = rb_entry(parent, struct rds_cong_map, m_rb_node);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 		diff = rds_addr_cmp(addr, &map->m_addr);
118*4882a593Smuzhiyun 		if (diff < 0)
119*4882a593Smuzhiyun 			p = &(*p)->rb_left;
120*4882a593Smuzhiyun 		else if (diff > 0)
121*4882a593Smuzhiyun 			p = &(*p)->rb_right;
122*4882a593Smuzhiyun 		else
123*4882a593Smuzhiyun 			return map;
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (insert) {
127*4882a593Smuzhiyun 		rb_link_node(&insert->m_rb_node, parent, p);
128*4882a593Smuzhiyun 		rb_insert_color(&insert->m_rb_node, &rds_cong_tree);
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 	return NULL;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun  * There is only ever one bitmap for any address.  Connections try and allocate
135*4882a593Smuzhiyun  * these bitmaps in the process getting pointers to them.  The bitmaps are only
136*4882a593Smuzhiyun  * ever freed as the module is removed after all connections have been freed.
137*4882a593Smuzhiyun  */
rds_cong_from_addr(const struct in6_addr * addr)138*4882a593Smuzhiyun static struct rds_cong_map *rds_cong_from_addr(const struct in6_addr *addr)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	struct rds_cong_map *map;
141*4882a593Smuzhiyun 	struct rds_cong_map *ret = NULL;
142*4882a593Smuzhiyun 	unsigned long zp;
143*4882a593Smuzhiyun 	unsigned long i;
144*4882a593Smuzhiyun 	unsigned long flags;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
147*4882a593Smuzhiyun 	if (!map)
148*4882a593Smuzhiyun 		return NULL;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	map->m_addr = *addr;
151*4882a593Smuzhiyun 	init_waitqueue_head(&map->m_waitq);
152*4882a593Smuzhiyun 	INIT_LIST_HEAD(&map->m_conn_list);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
155*4882a593Smuzhiyun 		zp = get_zeroed_page(GFP_KERNEL);
156*4882a593Smuzhiyun 		if (zp == 0)
157*4882a593Smuzhiyun 			goto out;
158*4882a593Smuzhiyun 		map->m_page_addrs[i] = zp;
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	spin_lock_irqsave(&rds_cong_lock, flags);
162*4882a593Smuzhiyun 	ret = rds_cong_tree_walk(addr, map);
163*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rds_cong_lock, flags);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	if (!ret) {
166*4882a593Smuzhiyun 		ret = map;
167*4882a593Smuzhiyun 		map = NULL;
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun out:
171*4882a593Smuzhiyun 	if (map) {
172*4882a593Smuzhiyun 		for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
173*4882a593Smuzhiyun 			free_page(map->m_page_addrs[i]);
174*4882a593Smuzhiyun 		kfree(map);
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	rdsdebug("map %p for addr %pI6c\n", ret, addr);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	return ret;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /*
183*4882a593Smuzhiyun  * Put the conn on its local map's list.  This is called when the conn is
184*4882a593Smuzhiyun  * really added to the hash.  It's nested under the rds_conn_lock, sadly.
185*4882a593Smuzhiyun  */
rds_cong_add_conn(struct rds_connection * conn)186*4882a593Smuzhiyun void rds_cong_add_conn(struct rds_connection *conn)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	unsigned long flags;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong);
191*4882a593Smuzhiyun 	spin_lock_irqsave(&rds_cong_lock, flags);
192*4882a593Smuzhiyun 	list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list);
193*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rds_cong_lock, flags);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
rds_cong_remove_conn(struct rds_connection * conn)196*4882a593Smuzhiyun void rds_cong_remove_conn(struct rds_connection *conn)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	unsigned long flags;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong);
201*4882a593Smuzhiyun 	spin_lock_irqsave(&rds_cong_lock, flags);
202*4882a593Smuzhiyun 	list_del_init(&conn->c_map_item);
203*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rds_cong_lock, flags);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
rds_cong_get_maps(struct rds_connection * conn)206*4882a593Smuzhiyun int rds_cong_get_maps(struct rds_connection *conn)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	conn->c_lcong = rds_cong_from_addr(&conn->c_laddr);
209*4882a593Smuzhiyun 	conn->c_fcong = rds_cong_from_addr(&conn->c_faddr);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	if (!(conn->c_lcong && conn->c_fcong))
212*4882a593Smuzhiyun 		return -ENOMEM;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	return 0;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
rds_cong_queue_updates(struct rds_cong_map * map)217*4882a593Smuzhiyun void rds_cong_queue_updates(struct rds_cong_map *map)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct rds_connection *conn;
220*4882a593Smuzhiyun 	unsigned long flags;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	spin_lock_irqsave(&rds_cong_lock, flags);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
225*4882a593Smuzhiyun 		struct rds_conn_path *cp = &conn->c_path[0];
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		rcu_read_lock();
228*4882a593Smuzhiyun 		if (!test_and_set_bit(0, &conn->c_map_queued) &&
229*4882a593Smuzhiyun 		    !rds_destroy_pending(cp->cp_conn)) {
230*4882a593Smuzhiyun 			rds_stats_inc(s_cong_update_queued);
231*4882a593Smuzhiyun 			/* We cannot inline the call to rds_send_xmit() here
232*4882a593Smuzhiyun 			 * for two reasons (both pertaining to a TCP transport):
233*4882a593Smuzhiyun 			 * 1. When we get here from the receive path, we
234*4882a593Smuzhiyun 			 *    are already holding the sock_lock (held by
235*4882a593Smuzhiyun 			 *    tcp_v4_rcv()). So inlining calls to
236*4882a593Smuzhiyun 			 *    tcp_setsockopt and/or tcp_sendmsg will deadlock
237*4882a593Smuzhiyun 			 *    when it tries to get the sock_lock())
238*4882a593Smuzhiyun 			 * 2. Interrupts are masked so that we can mark the
239*4882a593Smuzhiyun 			 *    port congested from both send and recv paths.
240*4882a593Smuzhiyun 			 *    (See comment around declaration of rdc_cong_lock).
241*4882a593Smuzhiyun 			 *    An attempt to get the sock_lock() here will
242*4882a593Smuzhiyun 			 *    therefore trigger warnings.
243*4882a593Smuzhiyun 			 * Defer the xmit to rds_send_worker() instead.
244*4882a593Smuzhiyun 			 */
245*4882a593Smuzhiyun 			queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
246*4882a593Smuzhiyun 		}
247*4882a593Smuzhiyun 		rcu_read_unlock();
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rds_cong_lock, flags);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
rds_cong_map_updated(struct rds_cong_map * map,uint64_t portmask)253*4882a593Smuzhiyun void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	rdsdebug("waking map %p for %pI4\n",
256*4882a593Smuzhiyun 	  map, &map->m_addr);
257*4882a593Smuzhiyun 	rds_stats_inc(s_cong_update_received);
258*4882a593Smuzhiyun 	atomic_inc(&rds_cong_generation);
259*4882a593Smuzhiyun 	if (waitqueue_active(&map->m_waitq))
260*4882a593Smuzhiyun 		wake_up(&map->m_waitq);
261*4882a593Smuzhiyun 	if (waitqueue_active(&rds_poll_waitq))
262*4882a593Smuzhiyun 		wake_up_all(&rds_poll_waitq);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	if (portmask && !list_empty(&rds_cong_monitor)) {
265*4882a593Smuzhiyun 		unsigned long flags;
266*4882a593Smuzhiyun 		struct rds_sock *rs;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		read_lock_irqsave(&rds_cong_monitor_lock, flags);
269*4882a593Smuzhiyun 		list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) {
270*4882a593Smuzhiyun 			spin_lock(&rs->rs_lock);
271*4882a593Smuzhiyun 			rs->rs_cong_notify |= (rs->rs_cong_mask & portmask);
272*4882a593Smuzhiyun 			rs->rs_cong_mask &= ~portmask;
273*4882a593Smuzhiyun 			spin_unlock(&rs->rs_lock);
274*4882a593Smuzhiyun 			if (rs->rs_cong_notify)
275*4882a593Smuzhiyun 				rds_wake_sk_sleep(rs);
276*4882a593Smuzhiyun 		}
277*4882a593Smuzhiyun 		read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rds_cong_map_updated);
281*4882a593Smuzhiyun 
rds_cong_updated_since(unsigned long * recent)282*4882a593Smuzhiyun int rds_cong_updated_since(unsigned long *recent)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	unsigned long gen = atomic_read(&rds_cong_generation);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (likely(*recent == gen))
287*4882a593Smuzhiyun 		return 0;
288*4882a593Smuzhiyun 	*recent = gen;
289*4882a593Smuzhiyun 	return 1;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun  * We're called under the locking that protects the sockets receive buffer
294*4882a593Smuzhiyun  * consumption.  This makes it a lot easier for the caller to only call us
295*4882a593Smuzhiyun  * when it knows that an existing set bit needs to be cleared, and vice versa.
296*4882a593Smuzhiyun  * We can't block and we need to deal with concurrent sockets working against
297*4882a593Smuzhiyun  * the same per-address map.
298*4882a593Smuzhiyun  */
rds_cong_set_bit(struct rds_cong_map * map,__be16 port)299*4882a593Smuzhiyun void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	unsigned long i;
302*4882a593Smuzhiyun 	unsigned long off;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	rdsdebug("setting congestion for %pI4:%u in map %p\n",
305*4882a593Smuzhiyun 	  &map->m_addr, ntohs(port), map);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
308*4882a593Smuzhiyun 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	set_bit_le(off, (void *)map->m_page_addrs[i]);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
rds_cong_clear_bit(struct rds_cong_map * map,__be16 port)313*4882a593Smuzhiyun void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	unsigned long i;
316*4882a593Smuzhiyun 	unsigned long off;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	rdsdebug("clearing congestion for %pI4:%u in map %p\n",
319*4882a593Smuzhiyun 	  &map->m_addr, ntohs(port), map);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
322*4882a593Smuzhiyun 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	clear_bit_le(off, (void *)map->m_page_addrs[i]);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
rds_cong_test_bit(struct rds_cong_map * map,__be16 port)327*4882a593Smuzhiyun static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	unsigned long i;
330*4882a593Smuzhiyun 	unsigned long off;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
333*4882a593Smuzhiyun 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	return test_bit_le(off, (void *)map->m_page_addrs[i]);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
rds_cong_add_socket(struct rds_sock * rs)338*4882a593Smuzhiyun void rds_cong_add_socket(struct rds_sock *rs)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	unsigned long flags;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	write_lock_irqsave(&rds_cong_monitor_lock, flags);
343*4882a593Smuzhiyun 	if (list_empty(&rs->rs_cong_list))
344*4882a593Smuzhiyun 		list_add(&rs->rs_cong_list, &rds_cong_monitor);
345*4882a593Smuzhiyun 	write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
rds_cong_remove_socket(struct rds_sock * rs)348*4882a593Smuzhiyun void rds_cong_remove_socket(struct rds_sock *rs)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	unsigned long flags;
351*4882a593Smuzhiyun 	struct rds_cong_map *map;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	write_lock_irqsave(&rds_cong_monitor_lock, flags);
354*4882a593Smuzhiyun 	list_del_init(&rs->rs_cong_list);
355*4882a593Smuzhiyun 	write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	/* update congestion map for now-closed port */
358*4882a593Smuzhiyun 	spin_lock_irqsave(&rds_cong_lock, flags);
359*4882a593Smuzhiyun 	map = rds_cong_tree_walk(&rs->rs_bound_addr, NULL);
360*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rds_cong_lock, flags);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
363*4882a593Smuzhiyun 		rds_cong_clear_bit(map, rs->rs_bound_port);
364*4882a593Smuzhiyun 		rds_cong_queue_updates(map);
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
rds_cong_wait(struct rds_cong_map * map,__be16 port,int nonblock,struct rds_sock * rs)368*4882a593Smuzhiyun int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
369*4882a593Smuzhiyun 		  struct rds_sock *rs)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	if (!rds_cong_test_bit(map, port))
372*4882a593Smuzhiyun 		return 0;
373*4882a593Smuzhiyun 	if (nonblock) {
374*4882a593Smuzhiyun 		if (rs && rs->rs_cong_monitor) {
375*4882a593Smuzhiyun 			unsigned long flags;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 			/* It would have been nice to have an atomic set_bit on
378*4882a593Smuzhiyun 			 * a uint64_t. */
379*4882a593Smuzhiyun 			spin_lock_irqsave(&rs->rs_lock, flags);
380*4882a593Smuzhiyun 			rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port));
381*4882a593Smuzhiyun 			spin_unlock_irqrestore(&rs->rs_lock, flags);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 			/* Test again - a congestion update may have arrived in
384*4882a593Smuzhiyun 			 * the meantime. */
385*4882a593Smuzhiyun 			if (!rds_cong_test_bit(map, port))
386*4882a593Smuzhiyun 				return 0;
387*4882a593Smuzhiyun 		}
388*4882a593Smuzhiyun 		rds_stats_inc(s_cong_send_error);
389*4882a593Smuzhiyun 		return -ENOBUFS;
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	rds_stats_inc(s_cong_send_blocked);
393*4882a593Smuzhiyun 	rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	return wait_event_interruptible(map->m_waitq,
396*4882a593Smuzhiyun 					!rds_cong_test_bit(map, port));
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun 
rds_cong_exit(void)399*4882a593Smuzhiyun void rds_cong_exit(void)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun 	struct rb_node *node;
402*4882a593Smuzhiyun 	struct rds_cong_map *map;
403*4882a593Smuzhiyun 	unsigned long i;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	while ((node = rb_first(&rds_cong_tree))) {
406*4882a593Smuzhiyun 		map = rb_entry(node, struct rds_cong_map, m_rb_node);
407*4882a593Smuzhiyun 		rdsdebug("freeing map %p\n", map);
408*4882a593Smuzhiyun 		rb_erase(&map->m_rb_node, &rds_cong_tree);
409*4882a593Smuzhiyun 		for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
410*4882a593Smuzhiyun 			free_page(map->m_page_addrs[i]);
411*4882a593Smuzhiyun 		kfree(map);
412*4882a593Smuzhiyun 	}
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun /*
416*4882a593Smuzhiyun  * Allocate a RDS message containing a congestion update.
417*4882a593Smuzhiyun  */
rds_cong_update_alloc(struct rds_connection * conn)418*4882a593Smuzhiyun struct rds_message *rds_cong_update_alloc(struct rds_connection *conn)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	struct rds_cong_map *map = conn->c_lcong;
421*4882a593Smuzhiyun 	struct rds_message *rm;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
424*4882a593Smuzhiyun 	if (!IS_ERR(rm))
425*4882a593Smuzhiyun 		rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	return rm;
428*4882a593Smuzhiyun }
429