1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is part of the Chelsio T4 Ethernet driver for Linux.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun * OpenIB.org BSD license below:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun * without modification, are permitted provided that the following
14*4882a593Smuzhiyun * conditions are met:
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * - Redistributions of source code must retain the above
17*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun * disclaimer.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun * provided with the distribution.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun * SOFTWARE.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/module.h>
36*4882a593Smuzhiyun #include <linux/netdevice.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include "cxgb4.h"
39*4882a593Smuzhiyun #include "sched.h"
40*4882a593Smuzhiyun
t4_sched_class_fw_cmd(struct port_info * pi,struct ch_sched_params * p,enum sched_fw_ops op)41*4882a593Smuzhiyun static int t4_sched_class_fw_cmd(struct port_info *pi,
42*4882a593Smuzhiyun struct ch_sched_params *p,
43*4882a593Smuzhiyun enum sched_fw_ops op)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct adapter *adap = pi->adapter;
46*4882a593Smuzhiyun struct sched_table *s = pi->sched_tbl;
47*4882a593Smuzhiyun struct sched_class *e;
48*4882a593Smuzhiyun int err = 0;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun e = &s->tab[p->u.params.class];
51*4882a593Smuzhiyun switch (op) {
52*4882a593Smuzhiyun case SCHED_FW_OP_ADD:
53*4882a593Smuzhiyun case SCHED_FW_OP_DEL:
54*4882a593Smuzhiyun err = t4_sched_params(adap, p->type,
55*4882a593Smuzhiyun p->u.params.level, p->u.params.mode,
56*4882a593Smuzhiyun p->u.params.rateunit,
57*4882a593Smuzhiyun p->u.params.ratemode,
58*4882a593Smuzhiyun p->u.params.channel, e->idx,
59*4882a593Smuzhiyun p->u.params.minrate, p->u.params.maxrate,
60*4882a593Smuzhiyun p->u.params.weight, p->u.params.pktsize,
61*4882a593Smuzhiyun p->u.params.burstsize);
62*4882a593Smuzhiyun break;
63*4882a593Smuzhiyun default:
64*4882a593Smuzhiyun err = -ENOTSUPP;
65*4882a593Smuzhiyun break;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun return err;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
t4_sched_bind_unbind_op(struct port_info * pi,void * arg,enum sched_bind_type type,bool bind)71*4882a593Smuzhiyun static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
72*4882a593Smuzhiyun enum sched_bind_type type, bool bind)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun struct adapter *adap = pi->adapter;
75*4882a593Smuzhiyun u32 fw_mnem, fw_class, fw_param;
76*4882a593Smuzhiyun unsigned int pf = adap->pf;
77*4882a593Smuzhiyun unsigned int vf = 0;
78*4882a593Smuzhiyun int err = 0;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun switch (type) {
81*4882a593Smuzhiyun case SCHED_QUEUE: {
82*4882a593Smuzhiyun struct sched_queue_entry *qe;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun qe = (struct sched_queue_entry *)arg;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* Create a template for the FW_PARAMS_CMD mnemonic and
87*4882a593Smuzhiyun * value (TX Scheduling Class in this case).
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
90*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(
91*4882a593Smuzhiyun FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
92*4882a593Smuzhiyun fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE;
93*4882a593Smuzhiyun fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id));
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun pf = adap->pf;
96*4882a593Smuzhiyun vf = 0;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun err = t4_set_params(adap, adap->mbox, pf, vf, 1,
99*4882a593Smuzhiyun &fw_param, &fw_class);
100*4882a593Smuzhiyun break;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun case SCHED_FLOWC: {
103*4882a593Smuzhiyun struct sched_flowc_entry *fe;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun fe = (struct sched_flowc_entry *)arg;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
108*4882a593Smuzhiyun err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
109*4882a593Smuzhiyun fe->param.tid, fw_class);
110*4882a593Smuzhiyun break;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun default:
113*4882a593Smuzhiyun err = -ENOTSUPP;
114*4882a593Smuzhiyun break;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun return err;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
t4_sched_entry_lookup(struct port_info * pi,enum sched_bind_type type,const u32 val)120*4882a593Smuzhiyun static void *t4_sched_entry_lookup(struct port_info *pi,
121*4882a593Smuzhiyun enum sched_bind_type type,
122*4882a593Smuzhiyun const u32 val)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun struct sched_table *s = pi->sched_tbl;
125*4882a593Smuzhiyun struct sched_class *e, *end;
126*4882a593Smuzhiyun void *found = NULL;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* Look for an entry with matching @val */
129*4882a593Smuzhiyun end = &s->tab[s->sched_size];
130*4882a593Smuzhiyun for (e = &s->tab[0]; e != end; ++e) {
131*4882a593Smuzhiyun if (e->state == SCHED_STATE_UNUSED ||
132*4882a593Smuzhiyun e->bind_type != type)
133*4882a593Smuzhiyun continue;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun switch (type) {
136*4882a593Smuzhiyun case SCHED_QUEUE: {
137*4882a593Smuzhiyun struct sched_queue_entry *qe;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun list_for_each_entry(qe, &e->entry_list, list) {
140*4882a593Smuzhiyun if (qe->cntxt_id == val) {
141*4882a593Smuzhiyun found = qe;
142*4882a593Smuzhiyun break;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun break;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun case SCHED_FLOWC: {
148*4882a593Smuzhiyun struct sched_flowc_entry *fe;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun list_for_each_entry(fe, &e->entry_list, list) {
151*4882a593Smuzhiyun if (fe->param.tid == val) {
152*4882a593Smuzhiyun found = fe;
153*4882a593Smuzhiyun break;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun break;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun default:
159*4882a593Smuzhiyun return NULL;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (found)
163*4882a593Smuzhiyun break;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun return found;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
cxgb4_sched_queue_lookup(struct net_device * dev,struct ch_sched_queue * p)169*4882a593Smuzhiyun struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
170*4882a593Smuzhiyun struct ch_sched_queue *p)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct port_info *pi = netdev2pinfo(dev);
173*4882a593Smuzhiyun struct sched_queue_entry *qe = NULL;
174*4882a593Smuzhiyun struct adapter *adap = pi->adapter;
175*4882a593Smuzhiyun struct sge_eth_txq *txq;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (p->queue < 0 || p->queue >= pi->nqsets)
178*4882a593Smuzhiyun return NULL;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
181*4882a593Smuzhiyun qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
182*4882a593Smuzhiyun return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
t4_sched_queue_unbind(struct port_info * pi,struct ch_sched_queue * p)185*4882a593Smuzhiyun static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun struct sched_queue_entry *qe = NULL;
188*4882a593Smuzhiyun struct adapter *adap = pi->adapter;
189*4882a593Smuzhiyun struct sge_eth_txq *txq;
190*4882a593Smuzhiyun struct sched_class *e;
191*4882a593Smuzhiyun int err = 0;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (p->queue < 0 || p->queue >= pi->nqsets)
194*4882a593Smuzhiyun return -ERANGE;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* Find the existing entry that the queue is bound to */
199*4882a593Smuzhiyun qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
200*4882a593Smuzhiyun if (qe) {
201*4882a593Smuzhiyun err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
202*4882a593Smuzhiyun false);
203*4882a593Smuzhiyun if (err)
204*4882a593Smuzhiyun return err;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun e = &pi->sched_tbl->tab[qe->param.class];
207*4882a593Smuzhiyun list_del(&qe->list);
208*4882a593Smuzhiyun kvfree(qe);
209*4882a593Smuzhiyun if (atomic_dec_and_test(&e->refcnt))
210*4882a593Smuzhiyun cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun return err;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
t4_sched_queue_bind(struct port_info * pi,struct ch_sched_queue * p)215*4882a593Smuzhiyun static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct sched_table *s = pi->sched_tbl;
218*4882a593Smuzhiyun struct sched_queue_entry *qe = NULL;
219*4882a593Smuzhiyun struct adapter *adap = pi->adapter;
220*4882a593Smuzhiyun struct sge_eth_txq *txq;
221*4882a593Smuzhiyun struct sched_class *e;
222*4882a593Smuzhiyun unsigned int qid;
223*4882a593Smuzhiyun int err = 0;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (p->queue < 0 || p->queue >= pi->nqsets)
226*4882a593Smuzhiyun return -ERANGE;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL);
229*4882a593Smuzhiyun if (!qe)
230*4882a593Smuzhiyun return -ENOMEM;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
233*4882a593Smuzhiyun qid = txq->q.cntxt_id;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Unbind queue from any existing class */
236*4882a593Smuzhiyun err = t4_sched_queue_unbind(pi, p);
237*4882a593Smuzhiyun if (err)
238*4882a593Smuzhiyun goto out_err;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Bind queue to specified class */
241*4882a593Smuzhiyun qe->cntxt_id = qid;
242*4882a593Smuzhiyun memcpy(&qe->param, p, sizeof(qe->param));
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun e = &s->tab[qe->param.class];
245*4882a593Smuzhiyun err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
246*4882a593Smuzhiyun if (err)
247*4882a593Smuzhiyun goto out_err;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun list_add_tail(&qe->list, &e->entry_list);
250*4882a593Smuzhiyun e->bind_type = SCHED_QUEUE;
251*4882a593Smuzhiyun atomic_inc(&e->refcnt);
252*4882a593Smuzhiyun return err;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun out_err:
255*4882a593Smuzhiyun kvfree(qe);
256*4882a593Smuzhiyun return err;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
t4_sched_flowc_unbind(struct port_info * pi,struct ch_sched_flowc * p)259*4882a593Smuzhiyun static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun struct sched_flowc_entry *fe = NULL;
262*4882a593Smuzhiyun struct adapter *adap = pi->adapter;
263*4882a593Smuzhiyun struct sched_class *e;
264*4882a593Smuzhiyun int err = 0;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (p->tid < 0 || p->tid >= adap->tids.neotids)
267*4882a593Smuzhiyun return -ERANGE;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* Find the existing entry that the flowc is bound to */
270*4882a593Smuzhiyun fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
271*4882a593Smuzhiyun if (fe) {
272*4882a593Smuzhiyun err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
273*4882a593Smuzhiyun false);
274*4882a593Smuzhiyun if (err)
275*4882a593Smuzhiyun return err;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun e = &pi->sched_tbl->tab[fe->param.class];
278*4882a593Smuzhiyun list_del(&fe->list);
279*4882a593Smuzhiyun kvfree(fe);
280*4882a593Smuzhiyun if (atomic_dec_and_test(&e->refcnt))
281*4882a593Smuzhiyun cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun return err;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
t4_sched_flowc_bind(struct port_info * pi,struct ch_sched_flowc * p)286*4882a593Smuzhiyun static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct sched_table *s = pi->sched_tbl;
289*4882a593Smuzhiyun struct sched_flowc_entry *fe = NULL;
290*4882a593Smuzhiyun struct adapter *adap = pi->adapter;
291*4882a593Smuzhiyun struct sched_class *e;
292*4882a593Smuzhiyun int err = 0;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (p->tid < 0 || p->tid >= adap->tids.neotids)
295*4882a593Smuzhiyun return -ERANGE;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
298*4882a593Smuzhiyun if (!fe)
299*4882a593Smuzhiyun return -ENOMEM;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* Unbind flowc from any existing class */
302*4882a593Smuzhiyun err = t4_sched_flowc_unbind(pi, p);
303*4882a593Smuzhiyun if (err)
304*4882a593Smuzhiyun goto out_err;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* Bind flowc to specified class */
307*4882a593Smuzhiyun memcpy(&fe->param, p, sizeof(fe->param));
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun e = &s->tab[fe->param.class];
310*4882a593Smuzhiyun err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
311*4882a593Smuzhiyun if (err)
312*4882a593Smuzhiyun goto out_err;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun list_add_tail(&fe->list, &e->entry_list);
315*4882a593Smuzhiyun e->bind_type = SCHED_FLOWC;
316*4882a593Smuzhiyun atomic_inc(&e->refcnt);
317*4882a593Smuzhiyun return err;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun out_err:
320*4882a593Smuzhiyun kvfree(fe);
321*4882a593Smuzhiyun return err;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
t4_sched_class_unbind_all(struct port_info * pi,struct sched_class * e,enum sched_bind_type type)324*4882a593Smuzhiyun static void t4_sched_class_unbind_all(struct port_info *pi,
325*4882a593Smuzhiyun struct sched_class *e,
326*4882a593Smuzhiyun enum sched_bind_type type)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun if (!e)
329*4882a593Smuzhiyun return;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun switch (type) {
332*4882a593Smuzhiyun case SCHED_QUEUE: {
333*4882a593Smuzhiyun struct sched_queue_entry *qe;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun list_for_each_entry(qe, &e->entry_list, list)
336*4882a593Smuzhiyun t4_sched_queue_unbind(pi, &qe->param);
337*4882a593Smuzhiyun break;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun case SCHED_FLOWC: {
340*4882a593Smuzhiyun struct sched_flowc_entry *fe;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun list_for_each_entry(fe, &e->entry_list, list)
343*4882a593Smuzhiyun t4_sched_flowc_unbind(pi, &fe->param);
344*4882a593Smuzhiyun break;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun default:
347*4882a593Smuzhiyun break;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
t4_sched_class_bind_unbind_op(struct port_info * pi,void * arg,enum sched_bind_type type,bool bind)351*4882a593Smuzhiyun static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
352*4882a593Smuzhiyun enum sched_bind_type type, bool bind)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun int err = 0;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (!arg)
357*4882a593Smuzhiyun return -EINVAL;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun switch (type) {
360*4882a593Smuzhiyun case SCHED_QUEUE: {
361*4882a593Smuzhiyun struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (bind)
364*4882a593Smuzhiyun err = t4_sched_queue_bind(pi, qe);
365*4882a593Smuzhiyun else
366*4882a593Smuzhiyun err = t4_sched_queue_unbind(pi, qe);
367*4882a593Smuzhiyun break;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun case SCHED_FLOWC: {
370*4882a593Smuzhiyun struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (bind)
373*4882a593Smuzhiyun err = t4_sched_flowc_bind(pi, fe);
374*4882a593Smuzhiyun else
375*4882a593Smuzhiyun err = t4_sched_flowc_unbind(pi, fe);
376*4882a593Smuzhiyun break;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun default:
379*4882a593Smuzhiyun err = -ENOTSUPP;
380*4882a593Smuzhiyun break;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun return err;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /**
387*4882a593Smuzhiyun * cxgb4_sched_class_bind - Bind an entity to a scheduling class
388*4882a593Smuzhiyun * @dev: net_device pointer
389*4882a593Smuzhiyun * @arg: Entity opaque data
390*4882a593Smuzhiyun * @type: Entity type (Queue)
391*4882a593Smuzhiyun *
392*4882a593Smuzhiyun * Binds an entity (queue) to a scheduling class. If the entity
393*4882a593Smuzhiyun * is bound to another class, it will be unbound from the other class
394*4882a593Smuzhiyun * and bound to the class specified in @arg.
395*4882a593Smuzhiyun */
cxgb4_sched_class_bind(struct net_device * dev,void * arg,enum sched_bind_type type)396*4882a593Smuzhiyun int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
397*4882a593Smuzhiyun enum sched_bind_type type)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun struct port_info *pi = netdev2pinfo(dev);
400*4882a593Smuzhiyun u8 class_id;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (!can_sched(dev))
403*4882a593Smuzhiyun return -ENOTSUPP;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (!arg)
406*4882a593Smuzhiyun return -EINVAL;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun switch (type) {
409*4882a593Smuzhiyun case SCHED_QUEUE: {
410*4882a593Smuzhiyun struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun class_id = qe->class;
413*4882a593Smuzhiyun break;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun case SCHED_FLOWC: {
416*4882a593Smuzhiyun struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun class_id = fe->class;
419*4882a593Smuzhiyun break;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun default:
422*4882a593Smuzhiyun return -ENOTSUPP;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (!valid_class_id(dev, class_id))
426*4882a593Smuzhiyun return -EINVAL;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun if (class_id == SCHED_CLS_NONE)
429*4882a593Smuzhiyun return -ENOTSUPP;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun return t4_sched_class_bind_unbind_op(pi, arg, type, true);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /**
436*4882a593Smuzhiyun * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
437*4882a593Smuzhiyun * @dev: net_device pointer
438*4882a593Smuzhiyun * @arg: Entity opaque data
439*4882a593Smuzhiyun * @type: Entity type (Queue)
440*4882a593Smuzhiyun *
441*4882a593Smuzhiyun * Unbinds an entity (queue) from a scheduling class.
442*4882a593Smuzhiyun */
cxgb4_sched_class_unbind(struct net_device * dev,void * arg,enum sched_bind_type type)443*4882a593Smuzhiyun int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
444*4882a593Smuzhiyun enum sched_bind_type type)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct port_info *pi = netdev2pinfo(dev);
447*4882a593Smuzhiyun u8 class_id;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun if (!can_sched(dev))
450*4882a593Smuzhiyun return -ENOTSUPP;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (!arg)
453*4882a593Smuzhiyun return -EINVAL;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun switch (type) {
456*4882a593Smuzhiyun case SCHED_QUEUE: {
457*4882a593Smuzhiyun struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun class_id = qe->class;
460*4882a593Smuzhiyun break;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun case SCHED_FLOWC: {
463*4882a593Smuzhiyun struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun class_id = fe->class;
466*4882a593Smuzhiyun break;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun default:
469*4882a593Smuzhiyun return -ENOTSUPP;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun if (!valid_class_id(dev, class_id))
473*4882a593Smuzhiyun return -EINVAL;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun return t4_sched_class_bind_unbind_op(pi, arg, type, false);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* If @p is NULL, fetch any available unused class */
t4_sched_class_lookup(struct port_info * pi,const struct ch_sched_params * p)479*4882a593Smuzhiyun static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
480*4882a593Smuzhiyun const struct ch_sched_params *p)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct sched_table *s = pi->sched_tbl;
483*4882a593Smuzhiyun struct sched_class *found = NULL;
484*4882a593Smuzhiyun struct sched_class *e, *end;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (!p) {
487*4882a593Smuzhiyun /* Get any available unused class */
488*4882a593Smuzhiyun end = &s->tab[s->sched_size];
489*4882a593Smuzhiyun for (e = &s->tab[0]; e != end; ++e) {
490*4882a593Smuzhiyun if (e->state == SCHED_STATE_UNUSED) {
491*4882a593Smuzhiyun found = e;
492*4882a593Smuzhiyun break;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun } else {
496*4882a593Smuzhiyun /* Look for a class with matching scheduling parameters */
497*4882a593Smuzhiyun struct ch_sched_params info;
498*4882a593Smuzhiyun struct ch_sched_params tp;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun memcpy(&tp, p, sizeof(tp));
501*4882a593Smuzhiyun /* Don't try to match class parameter */
502*4882a593Smuzhiyun tp.u.params.class = SCHED_CLS_NONE;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun end = &s->tab[s->sched_size];
505*4882a593Smuzhiyun for (e = &s->tab[0]; e != end; ++e) {
506*4882a593Smuzhiyun if (e->state == SCHED_STATE_UNUSED)
507*4882a593Smuzhiyun continue;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun memcpy(&info, &e->info, sizeof(info));
510*4882a593Smuzhiyun /* Don't try to match class parameter */
511*4882a593Smuzhiyun info.u.params.class = SCHED_CLS_NONE;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if ((info.type == tp.type) &&
514*4882a593Smuzhiyun (!memcmp(&info.u.params, &tp.u.params,
515*4882a593Smuzhiyun sizeof(info.u.params)))) {
516*4882a593Smuzhiyun found = e;
517*4882a593Smuzhiyun break;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun return found;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
t4_sched_class_alloc(struct port_info * pi,struct ch_sched_params * p)525*4882a593Smuzhiyun static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
526*4882a593Smuzhiyun struct ch_sched_params *p)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun struct sched_class *e = NULL;
529*4882a593Smuzhiyun u8 class_id;
530*4882a593Smuzhiyun int err;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun if (!p)
533*4882a593Smuzhiyun return NULL;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun class_id = p->u.params.class;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* Only accept search for existing class with matching params
538*4882a593Smuzhiyun * or allocation of new class with specified params
539*4882a593Smuzhiyun */
540*4882a593Smuzhiyun if (class_id != SCHED_CLS_NONE)
541*4882a593Smuzhiyun return NULL;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /* See if there's an exisiting class with same requested sched
544*4882a593Smuzhiyun * params. Classes can only be shared among FLOWC types. For
545*4882a593Smuzhiyun * other types, always request a new class.
546*4882a593Smuzhiyun */
547*4882a593Smuzhiyun if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
548*4882a593Smuzhiyun e = t4_sched_class_lookup(pi, p);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun if (!e) {
551*4882a593Smuzhiyun struct ch_sched_params np;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /* Fetch any available unused class */
554*4882a593Smuzhiyun e = t4_sched_class_lookup(pi, NULL);
555*4882a593Smuzhiyun if (!e)
556*4882a593Smuzhiyun return NULL;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun memcpy(&np, p, sizeof(np));
559*4882a593Smuzhiyun np.u.params.class = e->idx;
560*4882a593Smuzhiyun /* New class */
561*4882a593Smuzhiyun err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
562*4882a593Smuzhiyun if (err)
563*4882a593Smuzhiyun return NULL;
564*4882a593Smuzhiyun memcpy(&e->info, &np, sizeof(e->info));
565*4882a593Smuzhiyun atomic_set(&e->refcnt, 0);
566*4882a593Smuzhiyun e->state = SCHED_STATE_ACTIVE;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun return e;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /**
573*4882a593Smuzhiyun * cxgb4_sched_class_alloc - allocate a scheduling class
574*4882a593Smuzhiyun * @dev: net_device pointer
575*4882a593Smuzhiyun * @p: new scheduling class to create.
576*4882a593Smuzhiyun *
577*4882a593Smuzhiyun * Returns pointer to the scheduling class created. If @p is NULL, then
578*4882a593Smuzhiyun * it allocates and returns any available unused scheduling class. If a
579*4882a593Smuzhiyun * scheduling class with matching @p is found, then the matching class is
580*4882a593Smuzhiyun * returned.
581*4882a593Smuzhiyun */
cxgb4_sched_class_alloc(struct net_device * dev,struct ch_sched_params * p)582*4882a593Smuzhiyun struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
583*4882a593Smuzhiyun struct ch_sched_params *p)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun struct port_info *pi = netdev2pinfo(dev);
586*4882a593Smuzhiyun u8 class_id;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (!can_sched(dev))
589*4882a593Smuzhiyun return NULL;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun class_id = p->u.params.class;
592*4882a593Smuzhiyun if (!valid_class_id(dev, class_id))
593*4882a593Smuzhiyun return NULL;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun return t4_sched_class_alloc(pi, p);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /**
599*4882a593Smuzhiyun * cxgb4_sched_class_free - free a scheduling class
600*4882a593Smuzhiyun * @dev: net_device pointer
601*4882a593Smuzhiyun * @classid: scheduling class id to free
602*4882a593Smuzhiyun *
603*4882a593Smuzhiyun * Frees a scheduling class if there are no users.
604*4882a593Smuzhiyun */
cxgb4_sched_class_free(struct net_device * dev,u8 classid)605*4882a593Smuzhiyun void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun struct port_info *pi = netdev2pinfo(dev);
608*4882a593Smuzhiyun struct sched_table *s = pi->sched_tbl;
609*4882a593Smuzhiyun struct ch_sched_params p;
610*4882a593Smuzhiyun struct sched_class *e;
611*4882a593Smuzhiyun u32 speed;
612*4882a593Smuzhiyun int ret;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun e = &s->tab[classid];
615*4882a593Smuzhiyun if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) {
616*4882a593Smuzhiyun /* Port based rate limiting needs explicit reset back
617*4882a593Smuzhiyun * to max rate. But, we'll do explicit reset for all
618*4882a593Smuzhiyun * types, instead of just port based type, to be on
619*4882a593Smuzhiyun * the safer side.
620*4882a593Smuzhiyun */
621*4882a593Smuzhiyun memcpy(&p, &e->info, sizeof(p));
622*4882a593Smuzhiyun /* Always reset mode to 0. Otherwise, FLOWC mode will
623*4882a593Smuzhiyun * still be enabled even after resetting the traffic
624*4882a593Smuzhiyun * class.
625*4882a593Smuzhiyun */
626*4882a593Smuzhiyun p.u.params.mode = 0;
627*4882a593Smuzhiyun p.u.params.minrate = 0;
628*4882a593Smuzhiyun p.u.params.pktsize = 0;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun ret = t4_get_link_params(pi, NULL, &speed, NULL);
631*4882a593Smuzhiyun if (!ret)
632*4882a593Smuzhiyun p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */
633*4882a593Smuzhiyun else
634*4882a593Smuzhiyun p.u.params.maxrate = SCHED_MAX_RATE_KBPS;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun e->state = SCHED_STATE_UNUSED;
639*4882a593Smuzhiyun memset(&e->info, 0, sizeof(e->info));
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
t4_sched_class_free(struct net_device * dev,struct sched_class * e)643*4882a593Smuzhiyun static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun struct port_info *pi = netdev2pinfo(dev);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun t4_sched_class_unbind_all(pi, e, e->bind_type);
648*4882a593Smuzhiyun cxgb4_sched_class_free(dev, e->idx);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
t4_init_sched(unsigned int sched_size)651*4882a593Smuzhiyun struct sched_table *t4_init_sched(unsigned int sched_size)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun struct sched_table *s;
654*4882a593Smuzhiyun unsigned int i;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL);
657*4882a593Smuzhiyun if (!s)
658*4882a593Smuzhiyun return NULL;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun s->sched_size = sched_size;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun for (i = 0; i < s->sched_size; i++) {
663*4882a593Smuzhiyun memset(&s->tab[i], 0, sizeof(struct sched_class));
664*4882a593Smuzhiyun s->tab[i].idx = i;
665*4882a593Smuzhiyun s->tab[i].state = SCHED_STATE_UNUSED;
666*4882a593Smuzhiyun INIT_LIST_HEAD(&s->tab[i].entry_list);
667*4882a593Smuzhiyun atomic_set(&s->tab[i].refcnt, 0);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun return s;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
t4_cleanup_sched(struct adapter * adap)672*4882a593Smuzhiyun void t4_cleanup_sched(struct adapter *adap)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun struct sched_table *s;
675*4882a593Smuzhiyun unsigned int j, i;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun for_each_port(adap, j) {
678*4882a593Smuzhiyun struct port_info *pi = netdev2pinfo(adap->port[j]);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun s = pi->sched_tbl;
681*4882a593Smuzhiyun if (!s)
682*4882a593Smuzhiyun continue;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun for (i = 0; i < s->sched_size; i++) {
685*4882a593Smuzhiyun struct sched_class *e;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun e = &s->tab[i];
688*4882a593Smuzhiyun if (e->state == SCHED_STATE_ACTIVE)
689*4882a593Smuzhiyun t4_sched_class_free(adap->port[j], e);
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun kvfree(s);
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun }
694