1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2016 Chelsio Communications, Inc.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include "cxgbit.h"
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun static void
cxgbit_set_one_ppod(struct cxgbi_pagepod * ppod,struct cxgbi_task_tag_info * ttinfo,struct scatterlist ** sg_pp,unsigned int * sg_off)9*4882a593Smuzhiyun cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
10*4882a593Smuzhiyun struct cxgbi_task_tag_info *ttinfo,
11*4882a593Smuzhiyun struct scatterlist **sg_pp, unsigned int *sg_off)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
14*4882a593Smuzhiyun unsigned int offset = sg_off ? *sg_off : 0;
15*4882a593Smuzhiyun dma_addr_t addr = 0UL;
16*4882a593Smuzhiyun unsigned int len = 0;
17*4882a593Smuzhiyun int i;
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun if (sg) {
22*4882a593Smuzhiyun addr = sg_dma_address(sg);
23*4882a593Smuzhiyun len = sg_dma_len(sg);
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun for (i = 0; i < PPOD_PAGES_MAX; i++) {
27*4882a593Smuzhiyun if (sg) {
28*4882a593Smuzhiyun ppod->addr[i] = cpu_to_be64(addr + offset);
29*4882a593Smuzhiyun offset += PAGE_SIZE;
30*4882a593Smuzhiyun if (offset == (len + sg->offset)) {
31*4882a593Smuzhiyun offset = 0;
32*4882a593Smuzhiyun sg = sg_next(sg);
33*4882a593Smuzhiyun if (sg) {
34*4882a593Smuzhiyun addr = sg_dma_address(sg);
35*4882a593Smuzhiyun len = sg_dma_len(sg);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun } else {
39*4882a593Smuzhiyun ppod->addr[i] = 0ULL;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * the fifth address needs to be repeated in the next ppod, so do
45*4882a593Smuzhiyun * not move sg
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun if (sg_pp) {
48*4882a593Smuzhiyun *sg_pp = sg;
49*4882a593Smuzhiyun *sg_off = offset;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun if (offset == len) {
53*4882a593Smuzhiyun offset = 0;
54*4882a593Smuzhiyun if (sg) {
55*4882a593Smuzhiyun sg = sg_next(sg);
56*4882a593Smuzhiyun if (sg)
57*4882a593Smuzhiyun addr = sg_dma_address(sg);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static struct sk_buff *
cxgbit_ppod_init_idata(struct cxgbit_device * cdev,struct cxgbi_ppm * ppm,unsigned int idx,unsigned int npods,unsigned int tid)64*4882a593Smuzhiyun cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
65*4882a593Smuzhiyun unsigned int idx, unsigned int npods, unsigned int tid)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct ulp_mem_io *req;
68*4882a593Smuzhiyun struct ulptx_idata *idata;
69*4882a593Smuzhiyun unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
70*4882a593Smuzhiyun unsigned int dlen = npods << PPOD_SIZE_SHIFT;
71*4882a593Smuzhiyun unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
72*4882a593Smuzhiyun sizeof(struct ulptx_idata) + dlen, 16);
73*4882a593Smuzhiyun struct sk_buff *skb;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun skb = alloc_skb(wr_len, GFP_KERNEL);
76*4882a593Smuzhiyun if (!skb)
77*4882a593Smuzhiyun return NULL;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun req = __skb_put(skb, wr_len);
80*4882a593Smuzhiyun INIT_ULPTX_WR(req, wr_len, 0, tid);
81*4882a593Smuzhiyun req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
82*4882a593Smuzhiyun FW_WR_ATOMIC_V(0));
83*4882a593Smuzhiyun req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
84*4882a593Smuzhiyun ULP_MEMIO_ORDER_V(0) |
85*4882a593Smuzhiyun T5_ULP_MEMIO_IMM_V(1));
86*4882a593Smuzhiyun req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
87*4882a593Smuzhiyun req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
88*4882a593Smuzhiyun req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun idata = (struct ulptx_idata *)(req + 1);
91*4882a593Smuzhiyun idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
92*4882a593Smuzhiyun idata->len = htonl(dlen);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return skb;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun static int
cxgbit_ppod_write_idata(struct cxgbi_ppm * ppm,struct cxgbit_sock * csk,struct cxgbi_task_tag_info * ttinfo,unsigned int idx,unsigned int npods,struct scatterlist ** sg_pp,unsigned int * sg_off)98*4882a593Smuzhiyun cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
99*4882a593Smuzhiyun struct cxgbi_task_tag_info *ttinfo, unsigned int idx,
100*4882a593Smuzhiyun unsigned int npods, struct scatterlist **sg_pp,
101*4882a593Smuzhiyun unsigned int *sg_off)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun struct cxgbit_device *cdev = csk->com.cdev;
104*4882a593Smuzhiyun struct sk_buff *skb;
105*4882a593Smuzhiyun struct ulp_mem_io *req;
106*4882a593Smuzhiyun struct ulptx_idata *idata;
107*4882a593Smuzhiyun struct cxgbi_pagepod *ppod;
108*4882a593Smuzhiyun unsigned int i;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid);
111*4882a593Smuzhiyun if (!skb)
112*4882a593Smuzhiyun return -ENOMEM;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun req = (struct ulp_mem_io *)skb->data;
115*4882a593Smuzhiyun idata = (struct ulptx_idata *)(req + 1);
116*4882a593Smuzhiyun ppod = (struct cxgbi_pagepod *)(idata + 1);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun for (i = 0; i < npods; i++, ppod++)
119*4882a593Smuzhiyun cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun __skb_queue_tail(&csk->ppodq, skb);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun return 0;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun static int
cxgbit_ddp_set_map(struct cxgbi_ppm * ppm,struct cxgbit_sock * csk,struct cxgbi_task_tag_info * ttinfo)127*4882a593Smuzhiyun cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
128*4882a593Smuzhiyun struct cxgbi_task_tag_info *ttinfo)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun unsigned int pidx = ttinfo->idx;
131*4882a593Smuzhiyun unsigned int npods = ttinfo->npods;
132*4882a593Smuzhiyun unsigned int i, cnt;
133*4882a593Smuzhiyun struct scatterlist *sg = ttinfo->sgl;
134*4882a593Smuzhiyun unsigned int offset = 0;
135*4882a593Smuzhiyun int ret = 0;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun for (i = 0; i < npods; i += cnt, pidx += cnt) {
138*4882a593Smuzhiyun cnt = npods - i;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (cnt > ULPMEM_IDATA_MAX_NPPODS)
141*4882a593Smuzhiyun cnt = ULPMEM_IDATA_MAX_NPPODS;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
144*4882a593Smuzhiyun &sg, &offset);
145*4882a593Smuzhiyun if (ret < 0)
146*4882a593Smuzhiyun break;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun return ret;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
cxgbit_ddp_sgl_check(struct scatterlist * sg,unsigned int nents)152*4882a593Smuzhiyun static int cxgbit_ddp_sgl_check(struct scatterlist *sg,
153*4882a593Smuzhiyun unsigned int nents)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun unsigned int last_sgidx = nents - 1;
156*4882a593Smuzhiyun unsigned int i;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun for (i = 0; i < nents; i++, sg = sg_next(sg)) {
159*4882a593Smuzhiyun unsigned int len = sg->length + sg->offset;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if ((sg->offset & 0x3) || (i && sg->offset) ||
162*4882a593Smuzhiyun ((i != last_sgidx) && (len != PAGE_SIZE))) {
163*4882a593Smuzhiyun return -EINVAL;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun return 0;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun static int
cxgbit_ddp_reserve(struct cxgbit_sock * csk,struct cxgbi_task_tag_info * ttinfo,unsigned int xferlen)171*4882a593Smuzhiyun cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
172*4882a593Smuzhiyun unsigned int xferlen)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun struct cxgbit_device *cdev = csk->com.cdev;
175*4882a593Smuzhiyun struct cxgbi_ppm *ppm = cdev2ppm(cdev);
176*4882a593Smuzhiyun struct scatterlist *sgl = ttinfo->sgl;
177*4882a593Smuzhiyun unsigned int sgcnt = ttinfo->nents;
178*4882a593Smuzhiyun unsigned int sg_offset = sgl->offset;
179*4882a593Smuzhiyun int ret;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) {
182*4882a593Smuzhiyun pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
183*4882a593Smuzhiyun ppm, ppm->tformat.pgsz_idx_dflt,
184*4882a593Smuzhiyun xferlen, ttinfo->nents);
185*4882a593Smuzhiyun return -EINVAL;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0)
189*4882a593Smuzhiyun return -EINVAL;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun ttinfo->nr_pages = (xferlen + sgl->offset +
192*4882a593Smuzhiyun (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * the ddp tag will be used for the ttt in the outgoing r2t pdu
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
198*4882a593Smuzhiyun &ttinfo->tag, 0);
199*4882a593Smuzhiyun if (ret < 0)
200*4882a593Smuzhiyun return ret;
201*4882a593Smuzhiyun ttinfo->npods = ret;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun sgl->offset = 0;
204*4882a593Smuzhiyun ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
205*4882a593Smuzhiyun sgl->offset = sg_offset;
206*4882a593Smuzhiyun if (!ret) {
207*4882a593Smuzhiyun pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
208*4882a593Smuzhiyun __func__, 0, xferlen, sgcnt);
209*4882a593Smuzhiyun goto rel_ppods;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
213*4882a593Smuzhiyun xferlen, &ttinfo->hdr);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun ret = cxgbit_ddp_set_map(ppm, csk, ttinfo);
216*4882a593Smuzhiyun if (ret < 0) {
217*4882a593Smuzhiyun __skb_queue_purge(&csk->ppodq);
218*4882a593Smuzhiyun dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
219*4882a593Smuzhiyun goto rel_ppods;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return 0;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun rel_ppods:
225*4882a593Smuzhiyun cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
226*4882a593Smuzhiyun return -EINVAL;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun void
cxgbit_get_r2t_ttt(struct iscsi_conn * conn,struct iscsi_cmd * cmd,struct iscsi_r2t * r2t)230*4882a593Smuzhiyun cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
231*4882a593Smuzhiyun struct iscsi_r2t *r2t)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct cxgbit_sock *csk = conn->context;
234*4882a593Smuzhiyun struct cxgbit_device *cdev = csk->com.cdev;
235*4882a593Smuzhiyun struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
236*4882a593Smuzhiyun struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
237*4882a593Smuzhiyun int ret = -EINVAL;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if ((!ccmd->setup_ddp) ||
240*4882a593Smuzhiyun (!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
241*4882a593Smuzhiyun goto out;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun ccmd->setup_ddp = false;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun ttinfo->sgl = cmd->se_cmd.t_data_sg;
246*4882a593Smuzhiyun ttinfo->nents = cmd->se_cmd.t_data_nents;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
249*4882a593Smuzhiyun if (ret < 0) {
250*4882a593Smuzhiyun pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
251*4882a593Smuzhiyun csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun ttinfo->sgl = NULL;
254*4882a593Smuzhiyun ttinfo->nents = 0;
255*4882a593Smuzhiyun } else {
256*4882a593Smuzhiyun ccmd->release = true;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun out:
259*4882a593Smuzhiyun pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag);
260*4882a593Smuzhiyun r2t->targ_xfer_tag = ttinfo->tag;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
cxgbit_unmap_cmd(struct iscsi_conn * conn,struct iscsi_cmd * cmd)263*4882a593Smuzhiyun void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (ccmd->release) {
268*4882a593Smuzhiyun if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
269*4882a593Smuzhiyun put_page(sg_page(&ccmd->sg));
270*4882a593Smuzhiyun } else {
271*4882a593Smuzhiyun struct cxgbit_sock *csk = conn->context;
272*4882a593Smuzhiyun struct cxgbit_device *cdev = csk->com.cdev;
273*4882a593Smuzhiyun struct cxgbi_ppm *ppm = cdev2ppm(cdev);
274*4882a593Smuzhiyun struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* Abort the TCP conn if DDP is not complete to
277*4882a593Smuzhiyun * avoid any possibility of DDP after freeing
278*4882a593Smuzhiyun * the cmd.
279*4882a593Smuzhiyun */
280*4882a593Smuzhiyun if (unlikely(cmd->write_data_done !=
281*4882a593Smuzhiyun cmd->se_cmd.data_length))
282*4882a593Smuzhiyun cxgbit_abort_conn(csk);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (unlikely(ttinfo->sgl)) {
285*4882a593Smuzhiyun dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
286*4882a593Smuzhiyun ttinfo->nents, DMA_FROM_DEVICE);
287*4882a593Smuzhiyun ttinfo->nents = 0;
288*4882a593Smuzhiyun ttinfo->sgl = NULL;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun ccmd->release = false;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
cxgbit_ddp_init(struct cxgbit_device * cdev)296*4882a593Smuzhiyun int cxgbit_ddp_init(struct cxgbit_device *cdev)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct cxgb4_lld_info *lldi = &cdev->lldi;
299*4882a593Smuzhiyun struct net_device *ndev = cdev->lldi.ports[0];
300*4882a593Smuzhiyun struct cxgbi_tag_format tformat;
301*4882a593Smuzhiyun int ret, i;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (!lldi->vr->iscsi.size) {
304*4882a593Smuzhiyun pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
305*4882a593Smuzhiyun return -EACCES;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
309*4882a593Smuzhiyun for (i = 0; i < 4; i++)
310*4882a593Smuzhiyun tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
311*4882a593Smuzhiyun & 0xF;
312*4882a593Smuzhiyun cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0],
315*4882a593Smuzhiyun cdev->lldi.pdev, &cdev->lldi, &tformat,
316*4882a593Smuzhiyun lldi->vr->iscsi.size, lldi->iscsi_llimit,
317*4882a593Smuzhiyun lldi->vr->iscsi.start, 2,
318*4882a593Smuzhiyun lldi->vr->ppod_edram.start,
319*4882a593Smuzhiyun lldi->vr->ppod_edram.size);
320*4882a593Smuzhiyun if (ret >= 0) {
321*4882a593Smuzhiyun struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) &&
324*4882a593Smuzhiyun (ppm->ppmax >= 1024))
325*4882a593Smuzhiyun set_bit(CDEV_DDP_ENABLE, &cdev->flags);
326*4882a593Smuzhiyun ret = 0;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return ret;
330*4882a593Smuzhiyun }
331