1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3*4882a593Smuzhiyun * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun * OpenIB.org BSD license below:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun * without modification, are permitted provided that the following
13*4882a593Smuzhiyun * conditions are met:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * - Redistributions of source code must retain the above
16*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun * disclaimer.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun * provided with the distribution.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun * SOFTWARE.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include <linux/errno.h>
35*4882a593Smuzhiyun #include <linux/interrupt.h>
36*4882a593Smuzhiyun #include <linux/pci.h>
37*4882a593Smuzhiyun #include <linux/slab.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include "mthca_dev.h"
40*4882a593Smuzhiyun #include "mthca_cmd.h"
41*4882a593Smuzhiyun #include "mthca_config_reg.h"
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun enum {
44*4882a593Smuzhiyun MTHCA_NUM_ASYNC_EQE = 0x80,
45*4882a593Smuzhiyun MTHCA_NUM_CMD_EQE = 0x80,
46*4882a593Smuzhiyun MTHCA_NUM_SPARE_EQE = 0x80,
47*4882a593Smuzhiyun MTHCA_EQ_ENTRY_SIZE = 0x20
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * Must be packed because start is 64 bits but only aligned to 32 bits.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun struct mthca_eq_context {
54*4882a593Smuzhiyun __be32 flags;
55*4882a593Smuzhiyun __be64 start;
56*4882a593Smuzhiyun __be32 logsize_usrpage;
57*4882a593Smuzhiyun __be32 tavor_pd; /* reserved for Arbel */
58*4882a593Smuzhiyun u8 reserved1[3];
59*4882a593Smuzhiyun u8 intr;
60*4882a593Smuzhiyun __be32 arbel_pd; /* lost_count for Tavor */
61*4882a593Smuzhiyun __be32 lkey;
62*4882a593Smuzhiyun u32 reserved2[2];
63*4882a593Smuzhiyun __be32 consumer_index;
64*4882a593Smuzhiyun __be32 producer_index;
65*4882a593Smuzhiyun u32 reserved3[4];
66*4882a593Smuzhiyun } __packed;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #define MTHCA_EQ_STATUS_OK ( 0 << 28)
69*4882a593Smuzhiyun #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
70*4882a593Smuzhiyun #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28)
71*4882a593Smuzhiyun #define MTHCA_EQ_OWNER_SW ( 0 << 24)
72*4882a593Smuzhiyun #define MTHCA_EQ_OWNER_HW ( 1 << 24)
73*4882a593Smuzhiyun #define MTHCA_EQ_FLAG_TR ( 1 << 18)
74*4882a593Smuzhiyun #define MTHCA_EQ_FLAG_OI ( 1 << 17)
75*4882a593Smuzhiyun #define MTHCA_EQ_STATE_ARMED ( 1 << 8)
76*4882a593Smuzhiyun #define MTHCA_EQ_STATE_FIRED ( 2 << 8)
77*4882a593Smuzhiyun #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8)
78*4882a593Smuzhiyun #define MTHCA_EQ_STATE_ARBEL ( 8 << 8)
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun enum {
81*4882a593Smuzhiyun MTHCA_EVENT_TYPE_COMP = 0x00,
82*4882a593Smuzhiyun MTHCA_EVENT_TYPE_PATH_MIG = 0x01,
83*4882a593Smuzhiyun MTHCA_EVENT_TYPE_COMM_EST = 0x02,
84*4882a593Smuzhiyun MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,
85*4882a593Smuzhiyun MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
86*4882a593Smuzhiyun MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14,
87*4882a593Smuzhiyun MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,
88*4882a593Smuzhiyun MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
89*4882a593Smuzhiyun MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
90*4882a593Smuzhiyun MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
91*4882a593Smuzhiyun MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
92*4882a593Smuzhiyun MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
93*4882a593Smuzhiyun MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
94*4882a593Smuzhiyun MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08,
95*4882a593Smuzhiyun MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09,
96*4882a593Smuzhiyun MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
97*4882a593Smuzhiyun MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e,
98*4882a593Smuzhiyun MTHCA_EVENT_TYPE_CMD = 0x0a
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \
102*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \
103*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \
104*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \
105*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \
106*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \
107*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \
108*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
109*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
110*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
111*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
112*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
113*4882a593Smuzhiyun #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
114*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
115*4882a593Smuzhiyun (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
116*4882a593Smuzhiyun #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun #define MTHCA_EQ_DB_INC_CI (1 << 24)
119*4882a593Smuzhiyun #define MTHCA_EQ_DB_REQ_NOT (2 << 24)
120*4882a593Smuzhiyun #define MTHCA_EQ_DB_DISARM_CQ (3 << 24)
121*4882a593Smuzhiyun #define MTHCA_EQ_DB_SET_CI (4 << 24)
122*4882a593Smuzhiyun #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun struct mthca_eqe {
125*4882a593Smuzhiyun u8 reserved1;
126*4882a593Smuzhiyun u8 type;
127*4882a593Smuzhiyun u8 reserved2;
128*4882a593Smuzhiyun u8 subtype;
129*4882a593Smuzhiyun union {
130*4882a593Smuzhiyun u32 raw[6];
131*4882a593Smuzhiyun struct {
132*4882a593Smuzhiyun __be32 cqn;
133*4882a593Smuzhiyun } __packed comp;
134*4882a593Smuzhiyun struct {
135*4882a593Smuzhiyun u16 reserved1;
136*4882a593Smuzhiyun __be16 token;
137*4882a593Smuzhiyun u32 reserved2;
138*4882a593Smuzhiyun u8 reserved3[3];
139*4882a593Smuzhiyun u8 status;
140*4882a593Smuzhiyun __be64 out_param;
141*4882a593Smuzhiyun } __packed cmd;
142*4882a593Smuzhiyun struct {
143*4882a593Smuzhiyun __be32 qpn;
144*4882a593Smuzhiyun } __packed qp;
145*4882a593Smuzhiyun struct {
146*4882a593Smuzhiyun __be32 srqn;
147*4882a593Smuzhiyun } __packed srq;
148*4882a593Smuzhiyun struct {
149*4882a593Smuzhiyun __be32 cqn;
150*4882a593Smuzhiyun u32 reserved1;
151*4882a593Smuzhiyun u8 reserved2[3];
152*4882a593Smuzhiyun u8 syndrome;
153*4882a593Smuzhiyun } __packed cq_err;
154*4882a593Smuzhiyun struct {
155*4882a593Smuzhiyun u32 reserved1[2];
156*4882a593Smuzhiyun __be32 port;
157*4882a593Smuzhiyun } __packed port_change;
158*4882a593Smuzhiyun } event;
159*4882a593Smuzhiyun u8 reserved3[3];
160*4882a593Smuzhiyun u8 owner;
161*4882a593Smuzhiyun } __packed;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
164*4882a593Smuzhiyun #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
165*4882a593Smuzhiyun
async_mask(struct mthca_dev * dev)166*4882a593Smuzhiyun static inline u64 async_mask(struct mthca_dev *dev)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun return dev->mthca_flags & MTHCA_FLAG_SRQ ?
169*4882a593Smuzhiyun MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :
170*4882a593Smuzhiyun MTHCA_ASYNC_EVENT_MASK;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
tavor_set_eq_ci(struct mthca_dev * dev,struct mthca_eq * eq,u32 ci)173*4882a593Smuzhiyun static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * This barrier makes sure that all updates to ownership bits
177*4882a593Smuzhiyun * done by set_eqe_hw() hit memory before the consumer index
178*4882a593Smuzhiyun * is updated. set_eq_ci() allows the HCA to possibly write
179*4882a593Smuzhiyun * more EQ entries, and we want to avoid the exceedingly
180*4882a593Smuzhiyun * unlikely possibility of the HCA writing an entry and then
181*4882a593Smuzhiyun * having set_eqe_hw() overwrite the owner field.
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun wmb();
184*4882a593Smuzhiyun mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1),
185*4882a593Smuzhiyun dev->kar + MTHCA_EQ_DOORBELL,
186*4882a593Smuzhiyun MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
arbel_set_eq_ci(struct mthca_dev * dev,struct mthca_eq * eq,u32 ci)189*4882a593Smuzhiyun static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun /* See comment in tavor_set_eq_ci() above. */
192*4882a593Smuzhiyun wmb();
193*4882a593Smuzhiyun __raw_writel((__force u32) cpu_to_be32(ci),
194*4882a593Smuzhiyun dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
195*4882a593Smuzhiyun /* We still want ordering, just not swabbing, so add a barrier */
196*4882a593Smuzhiyun mb();
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
set_eq_ci(struct mthca_dev * dev,struct mthca_eq * eq,u32 ci)199*4882a593Smuzhiyun static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun if (mthca_is_memfree(dev))
202*4882a593Smuzhiyun arbel_set_eq_ci(dev, eq, ci);
203*4882a593Smuzhiyun else
204*4882a593Smuzhiyun tavor_set_eq_ci(dev, eq, ci);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
tavor_eq_req_not(struct mthca_dev * dev,int eqn)207*4882a593Smuzhiyun static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun mthca_write64(MTHCA_EQ_DB_REQ_NOT | eqn, 0,
210*4882a593Smuzhiyun dev->kar + MTHCA_EQ_DOORBELL,
211*4882a593Smuzhiyun MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
arbel_eq_req_not(struct mthca_dev * dev,u32 eqn_mask)214*4882a593Smuzhiyun static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
disarm_cq(struct mthca_dev * dev,int eqn,int cqn)219*4882a593Smuzhiyun static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun if (!mthca_is_memfree(dev)) {
222*4882a593Smuzhiyun mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn,
223*4882a593Smuzhiyun dev->kar + MTHCA_EQ_DOORBELL,
224*4882a593Smuzhiyun MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
get_eqe(struct mthca_eq * eq,u32 entry)228*4882a593Smuzhiyun static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
231*4882a593Smuzhiyun return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
next_eqe_sw(struct mthca_eq * eq)234*4882a593Smuzhiyun static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct mthca_eqe *eqe;
237*4882a593Smuzhiyun eqe = get_eqe(eq, eq->cons_index);
238*4882a593Smuzhiyun return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
set_eqe_hw(struct mthca_eqe * eqe)241*4882a593Smuzhiyun static inline void set_eqe_hw(struct mthca_eqe *eqe)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
port_change(struct mthca_dev * dev,int port,int active)246*4882a593Smuzhiyun static void port_change(struct mthca_dev *dev, int port, int active)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun struct ib_event record;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun mthca_dbg(dev, "Port change to %s for port %d\n",
251*4882a593Smuzhiyun active ? "active" : "down", port);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun record.device = &dev->ib_dev;
254*4882a593Smuzhiyun record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
255*4882a593Smuzhiyun record.element.port_num = port;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun ib_dispatch_event(&record);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
mthca_eq_int(struct mthca_dev * dev,struct mthca_eq * eq)260*4882a593Smuzhiyun static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct mthca_eqe *eqe;
263*4882a593Smuzhiyun int disarm_cqn;
264*4882a593Smuzhiyun int eqes_found = 0;
265*4882a593Smuzhiyun int set_ci = 0;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun while ((eqe = next_eqe_sw(eq))) {
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun * Make sure we read EQ entry contents after we've
270*4882a593Smuzhiyun * checked the ownership bit.
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun rmb();
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun switch (eqe->type) {
275*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_COMP:
276*4882a593Smuzhiyun disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
277*4882a593Smuzhiyun disarm_cq(dev, eq->eqn, disarm_cqn);
278*4882a593Smuzhiyun mthca_cq_completion(dev, disarm_cqn);
279*4882a593Smuzhiyun break;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_PATH_MIG:
282*4882a593Smuzhiyun mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
283*4882a593Smuzhiyun IB_EVENT_PATH_MIG);
284*4882a593Smuzhiyun break;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_COMM_EST:
287*4882a593Smuzhiyun mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
288*4882a593Smuzhiyun IB_EVENT_COMM_EST);
289*4882a593Smuzhiyun break;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_SQ_DRAINED:
292*4882a593Smuzhiyun mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
293*4882a593Smuzhiyun IB_EVENT_SQ_DRAINED);
294*4882a593Smuzhiyun break;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
297*4882a593Smuzhiyun mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
298*4882a593Smuzhiyun IB_EVENT_QP_LAST_WQE_REACHED);
299*4882a593Smuzhiyun break;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_SRQ_LIMIT:
302*4882a593Smuzhiyun mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
303*4882a593Smuzhiyun IB_EVENT_SRQ_LIMIT_REACHED);
304*4882a593Smuzhiyun break;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
307*4882a593Smuzhiyun mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
308*4882a593Smuzhiyun IB_EVENT_QP_FATAL);
309*4882a593Smuzhiyun break;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
312*4882a593Smuzhiyun mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
313*4882a593Smuzhiyun IB_EVENT_PATH_MIG_ERR);
314*4882a593Smuzhiyun break;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
317*4882a593Smuzhiyun mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
318*4882a593Smuzhiyun IB_EVENT_QP_REQ_ERR);
319*4882a593Smuzhiyun break;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
322*4882a593Smuzhiyun mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
323*4882a593Smuzhiyun IB_EVENT_QP_ACCESS_ERR);
324*4882a593Smuzhiyun break;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_CMD:
327*4882a593Smuzhiyun mthca_cmd_event(dev,
328*4882a593Smuzhiyun be16_to_cpu(eqe->event.cmd.token),
329*4882a593Smuzhiyun eqe->event.cmd.status,
330*4882a593Smuzhiyun be64_to_cpu(eqe->event.cmd.out_param));
331*4882a593Smuzhiyun break;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_PORT_CHANGE:
334*4882a593Smuzhiyun port_change(dev,
335*4882a593Smuzhiyun (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3,
336*4882a593Smuzhiyun eqe->subtype == 0x4);
337*4882a593Smuzhiyun break;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_CQ_ERROR:
340*4882a593Smuzhiyun mthca_warn(dev, "CQ %s on CQN %06x\n",
341*4882a593Smuzhiyun eqe->event.cq_err.syndrome == 1 ?
342*4882a593Smuzhiyun "overrun" : "access violation",
343*4882a593Smuzhiyun be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
344*4882a593Smuzhiyun mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
345*4882a593Smuzhiyun IB_EVENT_CQ_ERR);
346*4882a593Smuzhiyun break;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
349*4882a593Smuzhiyun mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
350*4882a593Smuzhiyun break;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:
353*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:
354*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:
355*4882a593Smuzhiyun case MTHCA_EVENT_TYPE_ECC_DETECT:
356*4882a593Smuzhiyun default:
357*4882a593Smuzhiyun mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
358*4882a593Smuzhiyun eqe->type, eqe->subtype, eq->eqn);
359*4882a593Smuzhiyun break;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun set_eqe_hw(eqe);
363*4882a593Smuzhiyun ++eq->cons_index;
364*4882a593Smuzhiyun eqes_found = 1;
365*4882a593Smuzhiyun ++set_ci;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /*
368*4882a593Smuzhiyun * The HCA will think the queue has overflowed if we
369*4882a593Smuzhiyun * don't tell it we've been processing events. We
370*4882a593Smuzhiyun * create our EQs with MTHCA_NUM_SPARE_EQE extra
371*4882a593Smuzhiyun * entries, so we must update our consumer index at
372*4882a593Smuzhiyun * least that often.
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun * Conditional on hca_type is OK here because
377*4882a593Smuzhiyun * this is a rare case, not the fast path.
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun set_eq_ci(dev, eq, eq->cons_index);
380*4882a593Smuzhiyun set_ci = 0;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /*
385*4882a593Smuzhiyun * Rely on caller to set consumer index so that we don't have
386*4882a593Smuzhiyun * to test hca_type in our interrupt handling fast path.
387*4882a593Smuzhiyun */
388*4882a593Smuzhiyun return eqes_found;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
mthca_tavor_interrupt(int irq,void * dev_ptr)391*4882a593Smuzhiyun static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct mthca_dev *dev = dev_ptr;
394*4882a593Smuzhiyun u32 ecr;
395*4882a593Smuzhiyun int i;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (dev->eq_table.clr_mask)
398*4882a593Smuzhiyun writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
401*4882a593Smuzhiyun if (!ecr)
402*4882a593Smuzhiyun return IRQ_NONE;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun writel(ecr, dev->eq_regs.tavor.ecr_base +
405*4882a593Smuzhiyun MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun for (i = 0; i < MTHCA_NUM_EQ; ++i)
408*4882a593Smuzhiyun if (ecr & dev->eq_table.eq[i].eqn_mask) {
409*4882a593Smuzhiyun if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
410*4882a593Smuzhiyun tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
411*4882a593Smuzhiyun dev->eq_table.eq[i].cons_index);
412*4882a593Smuzhiyun tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun return IRQ_HANDLED;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
mthca_tavor_msi_x_interrupt(int irq,void * eq_ptr)418*4882a593Smuzhiyun static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun struct mthca_eq *eq = eq_ptr;
421*4882a593Smuzhiyun struct mthca_dev *dev = eq->dev;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun mthca_eq_int(dev, eq);
424*4882a593Smuzhiyun tavor_set_eq_ci(dev, eq, eq->cons_index);
425*4882a593Smuzhiyun tavor_eq_req_not(dev, eq->eqn);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /* MSI-X vectors always belong to us */
428*4882a593Smuzhiyun return IRQ_HANDLED;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
mthca_arbel_interrupt(int irq,void * dev_ptr)431*4882a593Smuzhiyun static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun struct mthca_dev *dev = dev_ptr;
434*4882a593Smuzhiyun int work = 0;
435*4882a593Smuzhiyun int i;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (dev->eq_table.clr_mask)
438*4882a593Smuzhiyun writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun for (i = 0; i < MTHCA_NUM_EQ; ++i)
441*4882a593Smuzhiyun if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {
442*4882a593Smuzhiyun work = 1;
443*4882a593Smuzhiyun arbel_set_eq_ci(dev, &dev->eq_table.eq[i],
444*4882a593Smuzhiyun dev->eq_table.eq[i].cons_index);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun arbel_eq_req_not(dev, dev->eq_table.arm_mask);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun return IRQ_RETVAL(work);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
mthca_arbel_msi_x_interrupt(int irq,void * eq_ptr)452*4882a593Smuzhiyun static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun struct mthca_eq *eq = eq_ptr;
455*4882a593Smuzhiyun struct mthca_dev *dev = eq->dev;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun mthca_eq_int(dev, eq);
458*4882a593Smuzhiyun arbel_set_eq_ci(dev, eq, eq->cons_index);
459*4882a593Smuzhiyun arbel_eq_req_not(dev, eq->eqn_mask);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /* MSI-X vectors always belong to us */
462*4882a593Smuzhiyun return IRQ_HANDLED;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
mthca_create_eq(struct mthca_dev * dev,int nent,u8 intr,struct mthca_eq * eq)465*4882a593Smuzhiyun static int mthca_create_eq(struct mthca_dev *dev,
466*4882a593Smuzhiyun int nent,
467*4882a593Smuzhiyun u8 intr,
468*4882a593Smuzhiyun struct mthca_eq *eq)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun int npages;
471*4882a593Smuzhiyun u64 *dma_list = NULL;
472*4882a593Smuzhiyun dma_addr_t t;
473*4882a593Smuzhiyun struct mthca_mailbox *mailbox;
474*4882a593Smuzhiyun struct mthca_eq_context *eq_context;
475*4882a593Smuzhiyun int err = -ENOMEM;
476*4882a593Smuzhiyun int i;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun eq->dev = dev;
479*4882a593Smuzhiyun eq->nent = roundup_pow_of_two(max(nent, 2));
480*4882a593Smuzhiyun npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list),
483*4882a593Smuzhiyun GFP_KERNEL);
484*4882a593Smuzhiyun if (!eq->page_list)
485*4882a593Smuzhiyun goto err_out;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun for (i = 0; i < npages; ++i)
488*4882a593Smuzhiyun eq->page_list[i].buf = NULL;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL);
491*4882a593Smuzhiyun if (!dma_list)
492*4882a593Smuzhiyun goto err_out_free;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
495*4882a593Smuzhiyun if (IS_ERR(mailbox))
496*4882a593Smuzhiyun goto err_out_free;
497*4882a593Smuzhiyun eq_context = mailbox->buf;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun for (i = 0; i < npages; ++i) {
500*4882a593Smuzhiyun eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
501*4882a593Smuzhiyun PAGE_SIZE, &t, GFP_KERNEL);
502*4882a593Smuzhiyun if (!eq->page_list[i].buf)
503*4882a593Smuzhiyun goto err_out_free_pages;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun dma_list[i] = t;
506*4882a593Smuzhiyun dma_unmap_addr_set(&eq->page_list[i], mapping, t);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun clear_page(eq->page_list[i].buf);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun for (i = 0; i < eq->nent; ++i)
512*4882a593Smuzhiyun set_eqe_hw(get_eqe(eq, i));
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun eq->eqn = mthca_alloc(&dev->eq_table.alloc);
515*4882a593Smuzhiyun if (eq->eqn == -1)
516*4882a593Smuzhiyun goto err_out_free_pages;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
519*4882a593Smuzhiyun dma_list, PAGE_SHIFT, npages,
520*4882a593Smuzhiyun 0, npages * PAGE_SIZE,
521*4882a593Smuzhiyun MTHCA_MPT_FLAG_LOCAL_WRITE |
522*4882a593Smuzhiyun MTHCA_MPT_FLAG_LOCAL_READ,
523*4882a593Smuzhiyun &eq->mr);
524*4882a593Smuzhiyun if (err)
525*4882a593Smuzhiyun goto err_out_free_eq;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun memset(eq_context, 0, sizeof *eq_context);
528*4882a593Smuzhiyun eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
529*4882a593Smuzhiyun MTHCA_EQ_OWNER_HW |
530*4882a593Smuzhiyun MTHCA_EQ_STATE_ARMED |
531*4882a593Smuzhiyun MTHCA_EQ_FLAG_TR);
532*4882a593Smuzhiyun if (mthca_is_memfree(dev))
533*4882a593Smuzhiyun eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
536*4882a593Smuzhiyun if (mthca_is_memfree(dev)) {
537*4882a593Smuzhiyun eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
538*4882a593Smuzhiyun } else {
539*4882a593Smuzhiyun eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
540*4882a593Smuzhiyun eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun eq_context->intr = intr;
543*4882a593Smuzhiyun eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn);
546*4882a593Smuzhiyun if (err) {
547*4882a593Smuzhiyun mthca_warn(dev, "SW2HW_EQ returned %d\n", err);
548*4882a593Smuzhiyun goto err_out_free_mr;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun kfree(dma_list);
552*4882a593Smuzhiyun mthca_free_mailbox(dev, mailbox);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun eq->eqn_mask = swab32(1 << eq->eqn);
555*4882a593Smuzhiyun eq->cons_index = 0;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun dev->eq_table.arm_mask |= eq->eqn_mask;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
560*4882a593Smuzhiyun eq->eqn, eq->nent);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun return err;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun err_out_free_mr:
565*4882a593Smuzhiyun mthca_free_mr(dev, &eq->mr);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun err_out_free_eq:
568*4882a593Smuzhiyun mthca_free(&dev->eq_table.alloc, eq->eqn);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun err_out_free_pages:
571*4882a593Smuzhiyun for (i = 0; i < npages; ++i)
572*4882a593Smuzhiyun if (eq->page_list[i].buf)
573*4882a593Smuzhiyun dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
574*4882a593Smuzhiyun eq->page_list[i].buf,
575*4882a593Smuzhiyun dma_unmap_addr(&eq->page_list[i],
576*4882a593Smuzhiyun mapping));
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun mthca_free_mailbox(dev, mailbox);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun err_out_free:
581*4882a593Smuzhiyun kfree(eq->page_list);
582*4882a593Smuzhiyun kfree(dma_list);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun err_out:
585*4882a593Smuzhiyun return err;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
mthca_free_eq(struct mthca_dev * dev,struct mthca_eq * eq)588*4882a593Smuzhiyun static void mthca_free_eq(struct mthca_dev *dev,
589*4882a593Smuzhiyun struct mthca_eq *eq)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun struct mthca_mailbox *mailbox;
592*4882a593Smuzhiyun int err;
593*4882a593Smuzhiyun int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
594*4882a593Smuzhiyun PAGE_SIZE;
595*4882a593Smuzhiyun int i;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
598*4882a593Smuzhiyun if (IS_ERR(mailbox))
599*4882a593Smuzhiyun return;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn);
602*4882a593Smuzhiyun if (err)
603*4882a593Smuzhiyun mthca_warn(dev, "HW2SW_EQ returned %d\n", err);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun dev->eq_table.arm_mask &= ~eq->eqn_mask;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (0) {
608*4882a593Smuzhiyun mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
609*4882a593Smuzhiyun for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
610*4882a593Smuzhiyun if (i % 4 == 0)
611*4882a593Smuzhiyun printk("[%02x] ", i * 4);
612*4882a593Smuzhiyun printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
613*4882a593Smuzhiyun if ((i + 1) % 4 == 0)
614*4882a593Smuzhiyun printk("\n");
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun mthca_free_mr(dev, &eq->mr);
619*4882a593Smuzhiyun for (i = 0; i < npages; ++i)
620*4882a593Smuzhiyun pci_free_consistent(dev->pdev, PAGE_SIZE,
621*4882a593Smuzhiyun eq->page_list[i].buf,
622*4882a593Smuzhiyun dma_unmap_addr(&eq->page_list[i], mapping));
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun kfree(eq->page_list);
625*4882a593Smuzhiyun mthca_free_mailbox(dev, mailbox);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
mthca_free_irqs(struct mthca_dev * dev)628*4882a593Smuzhiyun static void mthca_free_irqs(struct mthca_dev *dev)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun int i;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun if (dev->eq_table.have_irq)
633*4882a593Smuzhiyun free_irq(dev->pdev->irq, dev);
634*4882a593Smuzhiyun for (i = 0; i < MTHCA_NUM_EQ; ++i)
635*4882a593Smuzhiyun if (dev->eq_table.eq[i].have_irq) {
636*4882a593Smuzhiyun free_irq(dev->eq_table.eq[i].msi_x_vector,
637*4882a593Smuzhiyun dev->eq_table.eq + i);
638*4882a593Smuzhiyun dev->eq_table.eq[i].have_irq = 0;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
mthca_map_reg(struct mthca_dev * dev,unsigned long offset,unsigned long size,void __iomem ** map)642*4882a593Smuzhiyun static int mthca_map_reg(struct mthca_dev *dev,
643*4882a593Smuzhiyun unsigned long offset, unsigned long size,
644*4882a593Smuzhiyun void __iomem **map)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun phys_addr_t base = pci_resource_start(dev->pdev, 0);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun *map = ioremap(base + offset, size);
649*4882a593Smuzhiyun if (!*map)
650*4882a593Smuzhiyun return -ENOMEM;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun return 0;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
mthca_map_eq_regs(struct mthca_dev * dev)655*4882a593Smuzhiyun static int mthca_map_eq_regs(struct mthca_dev *dev)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun if (mthca_is_memfree(dev)) {
658*4882a593Smuzhiyun /*
659*4882a593Smuzhiyun * We assume that the EQ arm and EQ set CI registers
660*4882a593Smuzhiyun * fall within the first BAR. We can't trust the
661*4882a593Smuzhiyun * values firmware gives us, since those addresses are
662*4882a593Smuzhiyun * valid on the HCA's side of the PCI bus but not
663*4882a593Smuzhiyun * necessarily the host side.
664*4882a593Smuzhiyun */
665*4882a593Smuzhiyun if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
666*4882a593Smuzhiyun dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
667*4882a593Smuzhiyun &dev->clr_base)) {
668*4882a593Smuzhiyun mthca_err(dev, "Couldn't map interrupt clear register, "
669*4882a593Smuzhiyun "aborting.\n");
670*4882a593Smuzhiyun return -ENOMEM;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /*
674*4882a593Smuzhiyun * Add 4 because we limit ourselves to EQs 0 ... 31,
675*4882a593Smuzhiyun * so we only need the low word of the register.
676*4882a593Smuzhiyun */
677*4882a593Smuzhiyun if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
678*4882a593Smuzhiyun dev->fw.arbel.eq_arm_base) + 4, 4,
679*4882a593Smuzhiyun &dev->eq_regs.arbel.eq_arm)) {
680*4882a593Smuzhiyun mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
681*4882a593Smuzhiyun iounmap(dev->clr_base);
682*4882a593Smuzhiyun return -ENOMEM;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
686*4882a593Smuzhiyun dev->fw.arbel.eq_set_ci_base,
687*4882a593Smuzhiyun MTHCA_EQ_SET_CI_SIZE,
688*4882a593Smuzhiyun &dev->eq_regs.arbel.eq_set_ci_base)) {
689*4882a593Smuzhiyun mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
690*4882a593Smuzhiyun iounmap(dev->eq_regs.arbel.eq_arm);
691*4882a593Smuzhiyun iounmap(dev->clr_base);
692*4882a593Smuzhiyun return -ENOMEM;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun } else {
695*4882a593Smuzhiyun if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
696*4882a593Smuzhiyun &dev->clr_base)) {
697*4882a593Smuzhiyun mthca_err(dev, "Couldn't map interrupt clear register, "
698*4882a593Smuzhiyun "aborting.\n");
699*4882a593Smuzhiyun return -ENOMEM;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (mthca_map_reg(dev, MTHCA_ECR_BASE,
703*4882a593Smuzhiyun MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
704*4882a593Smuzhiyun &dev->eq_regs.tavor.ecr_base)) {
705*4882a593Smuzhiyun mthca_err(dev, "Couldn't map ecr register, "
706*4882a593Smuzhiyun "aborting.\n");
707*4882a593Smuzhiyun iounmap(dev->clr_base);
708*4882a593Smuzhiyun return -ENOMEM;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun return 0;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
mthca_unmap_eq_regs(struct mthca_dev * dev)716*4882a593Smuzhiyun static void mthca_unmap_eq_regs(struct mthca_dev *dev)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun if (mthca_is_memfree(dev)) {
719*4882a593Smuzhiyun iounmap(dev->eq_regs.arbel.eq_set_ci_base);
720*4882a593Smuzhiyun iounmap(dev->eq_regs.arbel.eq_arm);
721*4882a593Smuzhiyun iounmap(dev->clr_base);
722*4882a593Smuzhiyun } else {
723*4882a593Smuzhiyun iounmap(dev->eq_regs.tavor.ecr_base);
724*4882a593Smuzhiyun iounmap(dev->clr_base);
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
mthca_map_eq_icm(struct mthca_dev * dev,u64 icm_virt)728*4882a593Smuzhiyun int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun int ret;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /*
733*4882a593Smuzhiyun * We assume that mapping one page is enough for the whole EQ
734*4882a593Smuzhiyun * context table. This is fine with all current HCAs, because
735*4882a593Smuzhiyun * we only use 32 EQs and each EQ uses 32 bytes of context
736*4882a593Smuzhiyun * memory, or 1 KB total.
737*4882a593Smuzhiyun */
738*4882a593Smuzhiyun dev->eq_table.icm_virt = icm_virt;
739*4882a593Smuzhiyun dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
740*4882a593Smuzhiyun if (!dev->eq_table.icm_page)
741*4882a593Smuzhiyun return -ENOMEM;
742*4882a593Smuzhiyun dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
743*4882a593Smuzhiyun PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
744*4882a593Smuzhiyun if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
745*4882a593Smuzhiyun __free_page(dev->eq_table.icm_page);
746*4882a593Smuzhiyun return -ENOMEM;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
750*4882a593Smuzhiyun if (ret) {
751*4882a593Smuzhiyun pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
752*4882a593Smuzhiyun PCI_DMA_BIDIRECTIONAL);
753*4882a593Smuzhiyun __free_page(dev->eq_table.icm_page);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun return ret;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
mthca_unmap_eq_icm(struct mthca_dev * dev)759*4882a593Smuzhiyun void mthca_unmap_eq_icm(struct mthca_dev *dev)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
762*4882a593Smuzhiyun pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
763*4882a593Smuzhiyun PCI_DMA_BIDIRECTIONAL);
764*4882a593Smuzhiyun __free_page(dev->eq_table.icm_page);
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
mthca_init_eq_table(struct mthca_dev * dev)767*4882a593Smuzhiyun int mthca_init_eq_table(struct mthca_dev *dev)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun int err;
770*4882a593Smuzhiyun u8 intr;
771*4882a593Smuzhiyun int i;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun err = mthca_alloc_init(&dev->eq_table.alloc,
774*4882a593Smuzhiyun dev->limits.num_eqs,
775*4882a593Smuzhiyun dev->limits.num_eqs - 1,
776*4882a593Smuzhiyun dev->limits.reserved_eqs);
777*4882a593Smuzhiyun if (err)
778*4882a593Smuzhiyun return err;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun err = mthca_map_eq_regs(dev);
781*4882a593Smuzhiyun if (err)
782*4882a593Smuzhiyun goto err_out_free;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
785*4882a593Smuzhiyun dev->eq_table.clr_mask = 0;
786*4882a593Smuzhiyun } else {
787*4882a593Smuzhiyun dev->eq_table.clr_mask =
788*4882a593Smuzhiyun swab32(1 << (dev->eq_table.inta_pin & 31));
789*4882a593Smuzhiyun dev->eq_table.clr_int = dev->clr_base +
790*4882a593Smuzhiyun (dev->eq_table.inta_pin < 32 ? 4 : 0);
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun dev->eq_table.arm_mask = 0;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun intr = dev->eq_table.inta_pin;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
798*4882a593Smuzhiyun (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
799*4882a593Smuzhiyun &dev->eq_table.eq[MTHCA_EQ_COMP]);
800*4882a593Smuzhiyun if (err)
801*4882a593Smuzhiyun goto err_out_unmap;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
804*4882a593Smuzhiyun (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
805*4882a593Smuzhiyun &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
806*4882a593Smuzhiyun if (err)
807*4882a593Smuzhiyun goto err_out_comp;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
810*4882a593Smuzhiyun (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
811*4882a593Smuzhiyun &dev->eq_table.eq[MTHCA_EQ_CMD]);
812*4882a593Smuzhiyun if (err)
813*4882a593Smuzhiyun goto err_out_async;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
816*4882a593Smuzhiyun static const char *eq_name[] = {
817*4882a593Smuzhiyun [MTHCA_EQ_COMP] = DRV_NAME "-comp",
818*4882a593Smuzhiyun [MTHCA_EQ_ASYNC] = DRV_NAME "-async",
819*4882a593Smuzhiyun [MTHCA_EQ_CMD] = DRV_NAME "-cmd"
820*4882a593Smuzhiyun };
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun for (i = 0; i < MTHCA_NUM_EQ; ++i) {
823*4882a593Smuzhiyun snprintf(dev->eq_table.eq[i].irq_name,
824*4882a593Smuzhiyun IB_DEVICE_NAME_MAX,
825*4882a593Smuzhiyun "%s@pci:%s", eq_name[i],
826*4882a593Smuzhiyun pci_name(dev->pdev));
827*4882a593Smuzhiyun err = request_irq(dev->eq_table.eq[i].msi_x_vector,
828*4882a593Smuzhiyun mthca_is_memfree(dev) ?
829*4882a593Smuzhiyun mthca_arbel_msi_x_interrupt :
830*4882a593Smuzhiyun mthca_tavor_msi_x_interrupt,
831*4882a593Smuzhiyun 0, dev->eq_table.eq[i].irq_name,
832*4882a593Smuzhiyun dev->eq_table.eq + i);
833*4882a593Smuzhiyun if (err)
834*4882a593Smuzhiyun goto err_out_cmd;
835*4882a593Smuzhiyun dev->eq_table.eq[i].have_irq = 1;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun } else {
838*4882a593Smuzhiyun snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
839*4882a593Smuzhiyun DRV_NAME "@pci:%s", pci_name(dev->pdev));
840*4882a593Smuzhiyun err = request_irq(dev->pdev->irq,
841*4882a593Smuzhiyun mthca_is_memfree(dev) ?
842*4882a593Smuzhiyun mthca_arbel_interrupt :
843*4882a593Smuzhiyun mthca_tavor_interrupt,
844*4882a593Smuzhiyun IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
845*4882a593Smuzhiyun if (err)
846*4882a593Smuzhiyun goto err_out_cmd;
847*4882a593Smuzhiyun dev->eq_table.have_irq = 1;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun err = mthca_MAP_EQ(dev, async_mask(dev),
851*4882a593Smuzhiyun 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
852*4882a593Smuzhiyun if (err)
853*4882a593Smuzhiyun mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
854*4882a593Smuzhiyun dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
857*4882a593Smuzhiyun 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
858*4882a593Smuzhiyun if (err)
859*4882a593Smuzhiyun mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
860*4882a593Smuzhiyun dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun for (i = 0; i < MTHCA_NUM_EQ; ++i)
863*4882a593Smuzhiyun if (mthca_is_memfree(dev))
864*4882a593Smuzhiyun arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
865*4882a593Smuzhiyun else
866*4882a593Smuzhiyun tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun return 0;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun err_out_cmd:
871*4882a593Smuzhiyun mthca_free_irqs(dev);
872*4882a593Smuzhiyun mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun err_out_async:
875*4882a593Smuzhiyun mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun err_out_comp:
878*4882a593Smuzhiyun mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun err_out_unmap:
881*4882a593Smuzhiyun mthca_unmap_eq_regs(dev);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun err_out_free:
884*4882a593Smuzhiyun mthca_alloc_cleanup(&dev->eq_table.alloc);
885*4882a593Smuzhiyun return err;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
mthca_cleanup_eq_table(struct mthca_dev * dev)888*4882a593Smuzhiyun void mthca_cleanup_eq_table(struct mthca_dev *dev)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun int i;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun mthca_free_irqs(dev);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun mthca_MAP_EQ(dev, async_mask(dev),
895*4882a593Smuzhiyun 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
896*4882a593Smuzhiyun mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
897*4882a593Smuzhiyun 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun for (i = 0; i < MTHCA_NUM_EQ; ++i)
900*4882a593Smuzhiyun mthca_free_eq(dev, &dev->eq_table.eq[i]);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun mthca_unmap_eq_regs(dev);
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun mthca_alloc_cleanup(&dev->eq_table.alloc);
905*4882a593Smuzhiyun }
906