xref: /OK3568_Linux_fs/kernel/drivers/infiniband/core/counters.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2019 Mellanox Technologies. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
6*4882a593Smuzhiyun #include <rdma/rdma_counter.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "core_priv.h"
9*4882a593Smuzhiyun #include "restrack.h"
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define ALL_AUTO_MODE_MASKS (RDMA_COUNTER_MASK_QP_TYPE | RDMA_COUNTER_MASK_PID)
12*4882a593Smuzhiyun 
__counter_set_mode(struct rdma_counter_mode * curr,enum rdma_nl_counter_mode new_mode,enum rdma_nl_counter_mask new_mask)13*4882a593Smuzhiyun static int __counter_set_mode(struct rdma_counter_mode *curr,
14*4882a593Smuzhiyun 			      enum rdma_nl_counter_mode new_mode,
15*4882a593Smuzhiyun 			      enum rdma_nl_counter_mask new_mask)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun 	if ((new_mode == RDMA_COUNTER_MODE_AUTO) &&
18*4882a593Smuzhiyun 	    ((new_mask & (~ALL_AUTO_MODE_MASKS)) ||
19*4882a593Smuzhiyun 	     (curr->mode != RDMA_COUNTER_MODE_NONE)))
20*4882a593Smuzhiyun 		return -EINVAL;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	curr->mode = new_mode;
23*4882a593Smuzhiyun 	curr->mask = new_mask;
24*4882a593Smuzhiyun 	return 0;
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /**
28*4882a593Smuzhiyun  * rdma_counter_set_auto_mode() - Turn on/off per-port auto mode
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  * When @on is true, the @mask must be set; When @on is false, it goes
31*4882a593Smuzhiyun  * into manual mode if there's any counter, so that the user is able to
32*4882a593Smuzhiyun  * manually access them.
33*4882a593Smuzhiyun  */
rdma_counter_set_auto_mode(struct ib_device * dev,u8 port,bool on,enum rdma_nl_counter_mask mask)34*4882a593Smuzhiyun int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
35*4882a593Smuzhiyun 			       bool on, enum rdma_nl_counter_mask mask)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
38*4882a593Smuzhiyun 	int ret;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	port_counter = &dev->port_data[port].port_counter;
41*4882a593Smuzhiyun 	if (!port_counter->hstats)
42*4882a593Smuzhiyun 		return -EOPNOTSUPP;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	mutex_lock(&port_counter->lock);
45*4882a593Smuzhiyun 	if (on) {
46*4882a593Smuzhiyun 		ret = __counter_set_mode(&port_counter->mode,
47*4882a593Smuzhiyun 					 RDMA_COUNTER_MODE_AUTO, mask);
48*4882a593Smuzhiyun 	} else {
49*4882a593Smuzhiyun 		if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) {
50*4882a593Smuzhiyun 			ret = -EINVAL;
51*4882a593Smuzhiyun 			goto out;
52*4882a593Smuzhiyun 		}
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 		if (port_counter->num_counters)
55*4882a593Smuzhiyun 			ret = __counter_set_mode(&port_counter->mode,
56*4882a593Smuzhiyun 						 RDMA_COUNTER_MODE_MANUAL, 0);
57*4882a593Smuzhiyun 		else
58*4882a593Smuzhiyun 			ret = __counter_set_mode(&port_counter->mode,
59*4882a593Smuzhiyun 						 RDMA_COUNTER_MODE_NONE, 0);
60*4882a593Smuzhiyun 	}
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun out:
63*4882a593Smuzhiyun 	mutex_unlock(&port_counter->lock);
64*4882a593Smuzhiyun 	return ret;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
rdma_counter_alloc(struct ib_device * dev,u8 port,enum rdma_nl_counter_mode mode)67*4882a593Smuzhiyun static struct rdma_counter *rdma_counter_alloc(struct ib_device *dev, u8 port,
68*4882a593Smuzhiyun 					       enum rdma_nl_counter_mode mode)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
71*4882a593Smuzhiyun 	struct rdma_counter *counter;
72*4882a593Smuzhiyun 	int ret;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (!dev->ops.counter_dealloc || !dev->ops.counter_alloc_stats)
75*4882a593Smuzhiyun 		return NULL;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
78*4882a593Smuzhiyun 	if (!counter)
79*4882a593Smuzhiyun 		return NULL;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	counter->device    = dev;
82*4882a593Smuzhiyun 	counter->port      = port;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	rdma_restrack_new(&counter->res, RDMA_RESTRACK_COUNTER);
85*4882a593Smuzhiyun 	counter->stats = dev->ops.counter_alloc_stats(counter);
86*4882a593Smuzhiyun 	if (!counter->stats)
87*4882a593Smuzhiyun 		goto err_stats;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	port_counter = &dev->port_data[port].port_counter;
90*4882a593Smuzhiyun 	mutex_lock(&port_counter->lock);
91*4882a593Smuzhiyun 	if (mode == RDMA_COUNTER_MODE_MANUAL) {
92*4882a593Smuzhiyun 		ret = __counter_set_mode(&port_counter->mode,
93*4882a593Smuzhiyun 					 RDMA_COUNTER_MODE_MANUAL, 0);
94*4882a593Smuzhiyun 		if (ret)
95*4882a593Smuzhiyun 			goto err_mode;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	port_counter->num_counters++;
99*4882a593Smuzhiyun 	mutex_unlock(&port_counter->lock);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	counter->mode.mode = mode;
102*4882a593Smuzhiyun 	kref_init(&counter->kref);
103*4882a593Smuzhiyun 	mutex_init(&counter->lock);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	return counter;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun err_mode:
108*4882a593Smuzhiyun 	mutex_unlock(&port_counter->lock);
109*4882a593Smuzhiyun 	kfree(counter->stats);
110*4882a593Smuzhiyun err_stats:
111*4882a593Smuzhiyun 	rdma_restrack_put(&counter->res);
112*4882a593Smuzhiyun 	kfree(counter);
113*4882a593Smuzhiyun 	return NULL;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
rdma_counter_free(struct rdma_counter * counter)116*4882a593Smuzhiyun static void rdma_counter_free(struct rdma_counter *counter)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	port_counter = &counter->device->port_data[counter->port].port_counter;
121*4882a593Smuzhiyun 	mutex_lock(&port_counter->lock);
122*4882a593Smuzhiyun 	port_counter->num_counters--;
123*4882a593Smuzhiyun 	if (!port_counter->num_counters &&
124*4882a593Smuzhiyun 	    (port_counter->mode.mode == RDMA_COUNTER_MODE_MANUAL))
125*4882a593Smuzhiyun 		__counter_set_mode(&port_counter->mode, RDMA_COUNTER_MODE_NONE,
126*4882a593Smuzhiyun 				   0);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	mutex_unlock(&port_counter->lock);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	rdma_restrack_del(&counter->res);
131*4882a593Smuzhiyun 	kfree(counter->stats);
132*4882a593Smuzhiyun 	kfree(counter);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
auto_mode_init_counter(struct rdma_counter * counter,const struct ib_qp * qp,enum rdma_nl_counter_mask new_mask)135*4882a593Smuzhiyun static void auto_mode_init_counter(struct rdma_counter *counter,
136*4882a593Smuzhiyun 				   const struct ib_qp *qp,
137*4882a593Smuzhiyun 				   enum rdma_nl_counter_mask new_mask)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	struct auto_mode_param *param = &counter->mode.param;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	counter->mode.mode = RDMA_COUNTER_MODE_AUTO;
142*4882a593Smuzhiyun 	counter->mode.mask = new_mask;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (new_mask & RDMA_COUNTER_MASK_QP_TYPE)
145*4882a593Smuzhiyun 		param->qp_type = qp->qp_type;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
auto_mode_match(struct ib_qp * qp,struct rdma_counter * counter,enum rdma_nl_counter_mask auto_mask)148*4882a593Smuzhiyun static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
149*4882a593Smuzhiyun 			    enum rdma_nl_counter_mask auto_mask)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	struct auto_mode_param *param = &counter->mode.param;
152*4882a593Smuzhiyun 	bool match = true;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
155*4882a593Smuzhiyun 		match &= (param->qp_type == qp->qp_type);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (auto_mask & RDMA_COUNTER_MASK_PID)
158*4882a593Smuzhiyun 		match &= (task_pid_nr(counter->res.task) ==
159*4882a593Smuzhiyun 			  task_pid_nr(qp->res.task));
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return match;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
__rdma_counter_bind_qp(struct rdma_counter * counter,struct ib_qp * qp)164*4882a593Smuzhiyun static int __rdma_counter_bind_qp(struct rdma_counter *counter,
165*4882a593Smuzhiyun 				  struct ib_qp *qp)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	int ret;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (qp->counter)
170*4882a593Smuzhiyun 		return -EINVAL;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (!qp->device->ops.counter_bind_qp)
173*4882a593Smuzhiyun 		return -EOPNOTSUPP;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	mutex_lock(&counter->lock);
176*4882a593Smuzhiyun 	ret = qp->device->ops.counter_bind_qp(counter, qp);
177*4882a593Smuzhiyun 	mutex_unlock(&counter->lock);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	return ret;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
__rdma_counter_unbind_qp(struct ib_qp * qp)182*4882a593Smuzhiyun static int __rdma_counter_unbind_qp(struct ib_qp *qp)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	struct rdma_counter *counter = qp->counter;
185*4882a593Smuzhiyun 	int ret;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	if (!qp->device->ops.counter_unbind_qp)
188*4882a593Smuzhiyun 		return -EOPNOTSUPP;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	mutex_lock(&counter->lock);
191*4882a593Smuzhiyun 	ret = qp->device->ops.counter_unbind_qp(qp);
192*4882a593Smuzhiyun 	mutex_unlock(&counter->lock);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	return ret;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
counter_history_stat_update(struct rdma_counter * counter)197*4882a593Smuzhiyun static void counter_history_stat_update(struct rdma_counter *counter)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	struct ib_device *dev = counter->device;
200*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
201*4882a593Smuzhiyun 	int i;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	port_counter = &dev->port_data[counter->port].port_counter;
204*4882a593Smuzhiyun 	if (!port_counter->hstats)
205*4882a593Smuzhiyun 		return;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	rdma_counter_query_stats(counter);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	for (i = 0; i < counter->stats->num_counters; i++)
210*4882a593Smuzhiyun 		port_counter->hstats->value[i] += counter->stats->value[i];
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun /**
214*4882a593Smuzhiyun  * rdma_get_counter_auto_mode - Find the counter that @qp should be bound
215*4882a593Smuzhiyun  *     with in auto mode
216*4882a593Smuzhiyun  *
217*4882a593Smuzhiyun  * Return: The counter (with ref-count increased) if found
218*4882a593Smuzhiyun  */
rdma_get_counter_auto_mode(struct ib_qp * qp,u8 port)219*4882a593Smuzhiyun static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp,
220*4882a593Smuzhiyun 						       u8 port)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
223*4882a593Smuzhiyun 	struct rdma_counter *counter = NULL;
224*4882a593Smuzhiyun 	struct ib_device *dev = qp->device;
225*4882a593Smuzhiyun 	struct rdma_restrack_entry *res;
226*4882a593Smuzhiyun 	struct rdma_restrack_root *rt;
227*4882a593Smuzhiyun 	unsigned long id = 0;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	port_counter = &dev->port_data[port].port_counter;
230*4882a593Smuzhiyun 	rt = &dev->res[RDMA_RESTRACK_COUNTER];
231*4882a593Smuzhiyun 	xa_lock(&rt->xa);
232*4882a593Smuzhiyun 	xa_for_each(&rt->xa, id, res) {
233*4882a593Smuzhiyun 		counter = container_of(res, struct rdma_counter, res);
234*4882a593Smuzhiyun 		if ((counter->device != qp->device) || (counter->port != port))
235*4882a593Smuzhiyun 			goto next;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		if (auto_mode_match(qp, counter, port_counter->mode.mask))
238*4882a593Smuzhiyun 			break;
239*4882a593Smuzhiyun next:
240*4882a593Smuzhiyun 		counter = NULL;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (counter && !kref_get_unless_zero(&counter->kref))
244*4882a593Smuzhiyun 		counter = NULL;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	xa_unlock(&rt->xa);
247*4882a593Smuzhiyun 	return counter;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
rdma_counter_res_add(struct rdma_counter * counter,struct ib_qp * qp)250*4882a593Smuzhiyun static void rdma_counter_res_add(struct rdma_counter *counter,
251*4882a593Smuzhiyun 				 struct ib_qp *qp)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	rdma_restrack_parent_name(&counter->res, &qp->res);
254*4882a593Smuzhiyun 	rdma_restrack_add(&counter->res);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
counter_release(struct kref * kref)257*4882a593Smuzhiyun static void counter_release(struct kref *kref)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	struct rdma_counter *counter;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	counter = container_of(kref, struct rdma_counter, kref);
262*4882a593Smuzhiyun 	counter_history_stat_update(counter);
263*4882a593Smuzhiyun 	counter->device->ops.counter_dealloc(counter);
264*4882a593Smuzhiyun 	rdma_counter_free(counter);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun /**
268*4882a593Smuzhiyun  * rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on
269*4882a593Smuzhiyun  *   the auto-mode rule
270*4882a593Smuzhiyun  */
rdma_counter_bind_qp_auto(struct ib_qp * qp,u8 port)271*4882a593Smuzhiyun int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
274*4882a593Smuzhiyun 	struct ib_device *dev = qp->device;
275*4882a593Smuzhiyun 	struct rdma_counter *counter;
276*4882a593Smuzhiyun 	int ret;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (!qp->res.valid || rdma_is_kernel_res(&qp->res))
279*4882a593Smuzhiyun 		return 0;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	if (!rdma_is_port_valid(dev, port))
282*4882a593Smuzhiyun 		return -EINVAL;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	port_counter = &dev->port_data[port].port_counter;
285*4882a593Smuzhiyun 	if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO)
286*4882a593Smuzhiyun 		return 0;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	counter = rdma_get_counter_auto_mode(qp, port);
289*4882a593Smuzhiyun 	if (counter) {
290*4882a593Smuzhiyun 		ret = __rdma_counter_bind_qp(counter, qp);
291*4882a593Smuzhiyun 		if (ret) {
292*4882a593Smuzhiyun 			kref_put(&counter->kref, counter_release);
293*4882a593Smuzhiyun 			return ret;
294*4882a593Smuzhiyun 		}
295*4882a593Smuzhiyun 	} else {
296*4882a593Smuzhiyun 		counter = rdma_counter_alloc(dev, port, RDMA_COUNTER_MODE_AUTO);
297*4882a593Smuzhiyun 		if (!counter)
298*4882a593Smuzhiyun 			return -ENOMEM;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		auto_mode_init_counter(counter, qp, port_counter->mode.mask);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 		ret = __rdma_counter_bind_qp(counter, qp);
303*4882a593Smuzhiyun 		if (ret) {
304*4882a593Smuzhiyun 			rdma_counter_free(counter);
305*4882a593Smuzhiyun 			return ret;
306*4882a593Smuzhiyun 		}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 		rdma_counter_res_add(counter, qp);
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	return 0;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun /**
315*4882a593Smuzhiyun  * rdma_counter_unbind_qp - Unbind a qp from a counter
316*4882a593Smuzhiyun  * @force:
317*4882a593Smuzhiyun  *   true - Decrease the counter ref-count anyway (e.g., qp destroy)
318*4882a593Smuzhiyun  */
rdma_counter_unbind_qp(struct ib_qp * qp,bool force)319*4882a593Smuzhiyun int rdma_counter_unbind_qp(struct ib_qp *qp, bool force)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	struct rdma_counter *counter = qp->counter;
322*4882a593Smuzhiyun 	int ret;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (!counter)
325*4882a593Smuzhiyun 		return -EINVAL;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	ret = __rdma_counter_unbind_qp(qp);
328*4882a593Smuzhiyun 	if (ret && !force)
329*4882a593Smuzhiyun 		return ret;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	kref_put(&counter->kref, counter_release);
332*4882a593Smuzhiyun 	return 0;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
rdma_counter_query_stats(struct rdma_counter * counter)335*4882a593Smuzhiyun int rdma_counter_query_stats(struct rdma_counter *counter)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	struct ib_device *dev = counter->device;
338*4882a593Smuzhiyun 	int ret;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	if (!dev->ops.counter_update_stats)
341*4882a593Smuzhiyun 		return -EINVAL;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	mutex_lock(&counter->lock);
344*4882a593Smuzhiyun 	ret = dev->ops.counter_update_stats(counter);
345*4882a593Smuzhiyun 	mutex_unlock(&counter->lock);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	return ret;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
get_running_counters_hwstat_sum(struct ib_device * dev,u8 port,u32 index)350*4882a593Smuzhiyun static u64 get_running_counters_hwstat_sum(struct ib_device *dev,
351*4882a593Smuzhiyun 					   u8 port, u32 index)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	struct rdma_restrack_entry *res;
354*4882a593Smuzhiyun 	struct rdma_restrack_root *rt;
355*4882a593Smuzhiyun 	struct rdma_counter *counter;
356*4882a593Smuzhiyun 	unsigned long id = 0;
357*4882a593Smuzhiyun 	u64 sum = 0;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	rt = &dev->res[RDMA_RESTRACK_COUNTER];
360*4882a593Smuzhiyun 	xa_lock(&rt->xa);
361*4882a593Smuzhiyun 	xa_for_each(&rt->xa, id, res) {
362*4882a593Smuzhiyun 		if (!rdma_restrack_get(res))
363*4882a593Smuzhiyun 			continue;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		xa_unlock(&rt->xa);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 		counter = container_of(res, struct rdma_counter, res);
368*4882a593Smuzhiyun 		if ((counter->device != dev) || (counter->port != port) ||
369*4882a593Smuzhiyun 		    rdma_counter_query_stats(counter))
370*4882a593Smuzhiyun 			goto next;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 		sum += counter->stats->value[index];
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun next:
375*4882a593Smuzhiyun 		xa_lock(&rt->xa);
376*4882a593Smuzhiyun 		rdma_restrack_put(res);
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	xa_unlock(&rt->xa);
380*4882a593Smuzhiyun 	return sum;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun /**
384*4882a593Smuzhiyun  * rdma_counter_get_hwstat_value() - Get the sum value of all counters on a
385*4882a593Smuzhiyun  *   specific port, including the running ones and history data
386*4882a593Smuzhiyun  */
rdma_counter_get_hwstat_value(struct ib_device * dev,u8 port,u32 index)387*4882a593Smuzhiyun u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
390*4882a593Smuzhiyun 	u64 sum;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	port_counter = &dev->port_data[port].port_counter;
393*4882a593Smuzhiyun 	if (!port_counter->hstats)
394*4882a593Smuzhiyun 		return 0;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	sum = get_running_counters_hwstat_sum(dev, port, index);
397*4882a593Smuzhiyun 	sum += port_counter->hstats->value[index];
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	return sum;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
rdma_counter_get_qp(struct ib_device * dev,u32 qp_num)402*4882a593Smuzhiyun static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	struct rdma_restrack_entry *res = NULL;
405*4882a593Smuzhiyun 	struct ib_qp *qp = NULL;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_QP, qp_num);
408*4882a593Smuzhiyun 	if (IS_ERR(res))
409*4882a593Smuzhiyun 		return NULL;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	qp = container_of(res, struct ib_qp, res);
412*4882a593Smuzhiyun 	if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
413*4882a593Smuzhiyun 		goto err;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	return qp;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun err:
418*4882a593Smuzhiyun 	rdma_restrack_put(res);
419*4882a593Smuzhiyun 	return NULL;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
rdma_counter_bind_qp_manual(struct rdma_counter * counter,struct ib_qp * qp)422*4882a593Smuzhiyun static int rdma_counter_bind_qp_manual(struct rdma_counter *counter,
423*4882a593Smuzhiyun 				       struct ib_qp *qp)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	if ((counter->device != qp->device) || (counter->port != qp->port))
426*4882a593Smuzhiyun 		return -EINVAL;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	return __rdma_counter_bind_qp(counter, qp);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
rdma_get_counter_by_id(struct ib_device * dev,u32 counter_id)431*4882a593Smuzhiyun static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
432*4882a593Smuzhiyun 						   u32 counter_id)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	struct rdma_restrack_entry *res;
435*4882a593Smuzhiyun 	struct rdma_counter *counter;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_COUNTER, counter_id);
438*4882a593Smuzhiyun 	if (IS_ERR(res))
439*4882a593Smuzhiyun 		return NULL;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	counter = container_of(res, struct rdma_counter, res);
442*4882a593Smuzhiyun 	kref_get(&counter->kref);
443*4882a593Smuzhiyun 	rdma_restrack_put(res);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	return counter;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun /**
449*4882a593Smuzhiyun  * rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id
450*4882a593Smuzhiyun  */
rdma_counter_bind_qpn(struct ib_device * dev,u8 port,u32 qp_num,u32 counter_id)451*4882a593Smuzhiyun int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
452*4882a593Smuzhiyun 			  u32 qp_num, u32 counter_id)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
455*4882a593Smuzhiyun 	struct rdma_counter *counter;
456*4882a593Smuzhiyun 	struct ib_qp *qp;
457*4882a593Smuzhiyun 	int ret;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	port_counter = &dev->port_data[port].port_counter;
460*4882a593Smuzhiyun 	if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
461*4882a593Smuzhiyun 		return -EINVAL;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	qp = rdma_counter_get_qp(dev, qp_num);
464*4882a593Smuzhiyun 	if (!qp)
465*4882a593Smuzhiyun 		return -ENOENT;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	counter = rdma_get_counter_by_id(dev, counter_id);
468*4882a593Smuzhiyun 	if (!counter) {
469*4882a593Smuzhiyun 		ret = -ENOENT;
470*4882a593Smuzhiyun 		goto err;
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) {
474*4882a593Smuzhiyun 		ret = -EINVAL;
475*4882a593Smuzhiyun 		goto err_task;
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	ret = rdma_counter_bind_qp_manual(counter, qp);
479*4882a593Smuzhiyun 	if (ret)
480*4882a593Smuzhiyun 		goto err_task;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	rdma_restrack_put(&qp->res);
483*4882a593Smuzhiyun 	return 0;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun err_task:
486*4882a593Smuzhiyun 	kref_put(&counter->kref, counter_release);
487*4882a593Smuzhiyun err:
488*4882a593Smuzhiyun 	rdma_restrack_put(&qp->res);
489*4882a593Smuzhiyun 	return ret;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun /**
493*4882a593Smuzhiyun  * rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it
494*4882a593Smuzhiyun  *   The id of new counter is returned in @counter_id
495*4882a593Smuzhiyun  */
rdma_counter_bind_qpn_alloc(struct ib_device * dev,u8 port,u32 qp_num,u32 * counter_id)496*4882a593Smuzhiyun int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
497*4882a593Smuzhiyun 				u32 qp_num, u32 *counter_id)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
500*4882a593Smuzhiyun 	struct rdma_counter *counter;
501*4882a593Smuzhiyun 	struct ib_qp *qp;
502*4882a593Smuzhiyun 	int ret;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	if (!rdma_is_port_valid(dev, port))
505*4882a593Smuzhiyun 		return -EINVAL;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	port_counter = &dev->port_data[port].port_counter;
508*4882a593Smuzhiyun 	if (!port_counter->hstats)
509*4882a593Smuzhiyun 		return -EOPNOTSUPP;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
512*4882a593Smuzhiyun 		return -EINVAL;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	qp = rdma_counter_get_qp(dev, qp_num);
515*4882a593Smuzhiyun 	if (!qp)
516*4882a593Smuzhiyun 		return -ENOENT;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) {
519*4882a593Smuzhiyun 		ret = -EINVAL;
520*4882a593Smuzhiyun 		goto err;
521*4882a593Smuzhiyun 	}
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	counter = rdma_counter_alloc(dev, port, RDMA_COUNTER_MODE_MANUAL);
524*4882a593Smuzhiyun 	if (!counter) {
525*4882a593Smuzhiyun 		ret = -ENOMEM;
526*4882a593Smuzhiyun 		goto err;
527*4882a593Smuzhiyun 	}
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	ret = rdma_counter_bind_qp_manual(counter, qp);
530*4882a593Smuzhiyun 	if (ret)
531*4882a593Smuzhiyun 		goto err_bind;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	if (counter_id)
534*4882a593Smuzhiyun 		*counter_id = counter->id;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	rdma_counter_res_add(counter, qp);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	rdma_restrack_put(&qp->res);
539*4882a593Smuzhiyun 	return ret;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun err_bind:
542*4882a593Smuzhiyun 	rdma_counter_free(counter);
543*4882a593Smuzhiyun err:
544*4882a593Smuzhiyun 	rdma_restrack_put(&qp->res);
545*4882a593Smuzhiyun 	return ret;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun /**
549*4882a593Smuzhiyun  * rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter
550*4882a593Smuzhiyun  */
rdma_counter_unbind_qpn(struct ib_device * dev,u8 port,u32 qp_num,u32 counter_id)551*4882a593Smuzhiyun int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port,
552*4882a593Smuzhiyun 			    u32 qp_num, u32 counter_id)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
555*4882a593Smuzhiyun 	struct ib_qp *qp;
556*4882a593Smuzhiyun 	int ret;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	if (!rdma_is_port_valid(dev, port))
559*4882a593Smuzhiyun 		return -EINVAL;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	qp = rdma_counter_get_qp(dev, qp_num);
562*4882a593Smuzhiyun 	if (!qp)
563*4882a593Smuzhiyun 		return -ENOENT;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) {
566*4882a593Smuzhiyun 		ret = -EINVAL;
567*4882a593Smuzhiyun 		goto out;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	port_counter = &dev->port_data[port].port_counter;
571*4882a593Smuzhiyun 	if (!qp->counter || qp->counter->id != counter_id ||
572*4882a593Smuzhiyun 	    port_counter->mode.mode != RDMA_COUNTER_MODE_MANUAL) {
573*4882a593Smuzhiyun 		ret = -EINVAL;
574*4882a593Smuzhiyun 		goto out;
575*4882a593Smuzhiyun 	}
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	ret = rdma_counter_unbind_qp(qp, false);
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun out:
580*4882a593Smuzhiyun 	rdma_restrack_put(&qp->res);
581*4882a593Smuzhiyun 	return ret;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun 
rdma_counter_get_mode(struct ib_device * dev,u8 port,enum rdma_nl_counter_mode * mode,enum rdma_nl_counter_mask * mask)584*4882a593Smuzhiyun int rdma_counter_get_mode(struct ib_device *dev, u8 port,
585*4882a593Smuzhiyun 			  enum rdma_nl_counter_mode *mode,
586*4882a593Smuzhiyun 			  enum rdma_nl_counter_mask *mask)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	port_counter = &dev->port_data[port].port_counter;
591*4882a593Smuzhiyun 	*mode = port_counter->mode.mode;
592*4882a593Smuzhiyun 	*mask = port_counter->mode.mask;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	return 0;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
rdma_counter_init(struct ib_device * dev)597*4882a593Smuzhiyun void rdma_counter_init(struct ib_device *dev)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
600*4882a593Smuzhiyun 	u32 port, i;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (!dev->port_data)
603*4882a593Smuzhiyun 		return;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	rdma_for_each_port(dev, port) {
606*4882a593Smuzhiyun 		port_counter = &dev->port_data[port].port_counter;
607*4882a593Smuzhiyun 		port_counter->mode.mode = RDMA_COUNTER_MODE_NONE;
608*4882a593Smuzhiyun 		mutex_init(&port_counter->lock);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 		if (!dev->ops.alloc_hw_stats)
611*4882a593Smuzhiyun 			continue;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 		port_counter->hstats = dev->ops.alloc_hw_stats(dev, port);
614*4882a593Smuzhiyun 		if (!port_counter->hstats)
615*4882a593Smuzhiyun 			goto fail;
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	return;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun fail:
621*4882a593Smuzhiyun 	for (i = port; i >= rdma_start_port(dev); i--) {
622*4882a593Smuzhiyun 		port_counter = &dev->port_data[port].port_counter;
623*4882a593Smuzhiyun 		kfree(port_counter->hstats);
624*4882a593Smuzhiyun 		port_counter->hstats = NULL;
625*4882a593Smuzhiyun 		mutex_destroy(&port_counter->lock);
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun 
rdma_counter_release(struct ib_device * dev)629*4882a593Smuzhiyun void rdma_counter_release(struct ib_device *dev)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun 	struct rdma_port_counter *port_counter;
632*4882a593Smuzhiyun 	u32 port;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	rdma_for_each_port(dev, port) {
635*4882a593Smuzhiyun 		port_counter = &dev->port_data[port].port_counter;
636*4882a593Smuzhiyun 		kfree(port_counter->hstats);
637*4882a593Smuzhiyun 		mutex_destroy(&port_counter->lock);
638*4882a593Smuzhiyun 	}
639*4882a593Smuzhiyun }
640