xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/cavium/liquidio/response_manager.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /**********************************************************************
2*4882a593Smuzhiyun  * Author: Cavium, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Contact: support@cavium.com
5*4882a593Smuzhiyun  *          Please include "LiquidIO" in the subject.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (c) 2003-2016 Cavium, Inc.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This file is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun  * it under the terms of the GNU General Public License, Version 2, as
11*4882a593Smuzhiyun  * published by the Free Software Foundation.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * This file is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15*4882a593Smuzhiyun  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16*4882a593Smuzhiyun  * NONINFRINGEMENT.  See the GNU General Public License for more
17*4882a593Smuzhiyun  * details.
18*4882a593Smuzhiyun  **********************************************************************/
19*4882a593Smuzhiyun #include <linux/pci.h>
20*4882a593Smuzhiyun #include <linux/netdevice.h>
21*4882a593Smuzhiyun #include "liquidio_common.h"
22*4882a593Smuzhiyun #include "octeon_droq.h"
23*4882a593Smuzhiyun #include "octeon_iq.h"
24*4882a593Smuzhiyun #include "response_manager.h"
25*4882a593Smuzhiyun #include "octeon_device.h"
26*4882a593Smuzhiyun #include "octeon_main.h"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun static void oct_poll_req_completion(struct work_struct *work);
29*4882a593Smuzhiyun 
octeon_setup_response_list(struct octeon_device * oct)30*4882a593Smuzhiyun int octeon_setup_response_list(struct octeon_device *oct)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	int i, ret = 0;
33*4882a593Smuzhiyun 	struct cavium_wq *cwq;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	for (i = 0; i < MAX_RESPONSE_LISTS; i++) {
36*4882a593Smuzhiyun 		INIT_LIST_HEAD(&oct->response_list[i].head);
37*4882a593Smuzhiyun 		spin_lock_init(&oct->response_list[i].lock);
38*4882a593Smuzhiyun 		atomic_set(&oct->response_list[i].pending_req_count, 0);
39*4882a593Smuzhiyun 	}
40*4882a593Smuzhiyun 	spin_lock_init(&oct->cmd_resp_wqlock);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
43*4882a593Smuzhiyun 	if (!oct->dma_comp_wq.wq) {
44*4882a593Smuzhiyun 		dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
45*4882a593Smuzhiyun 		return -ENOMEM;
46*4882a593Smuzhiyun 	}
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	cwq = &oct->dma_comp_wq;
49*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
50*4882a593Smuzhiyun 	cwq->wk.ctxptr = oct;
51*4882a593Smuzhiyun 	oct->cmd_resp_state = OCT_DRV_ONLINE;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	return ret;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
octeon_delete_response_list(struct octeon_device * oct)56*4882a593Smuzhiyun void octeon_delete_response_list(struct octeon_device *oct)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
59*4882a593Smuzhiyun 	destroy_workqueue(oct->dma_comp_wq.wq);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
lio_process_ordered_list(struct octeon_device * octeon_dev,u32 force_quit)62*4882a593Smuzhiyun int lio_process_ordered_list(struct octeon_device *octeon_dev,
63*4882a593Smuzhiyun 			     u32 force_quit)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct octeon_response_list *ordered_sc_list;
66*4882a593Smuzhiyun 	struct octeon_soft_command *sc;
67*4882a593Smuzhiyun 	int request_complete = 0;
68*4882a593Smuzhiyun 	int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
69*4882a593Smuzhiyun 	u32 status;
70*4882a593Smuzhiyun 	u64 status64;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	octeon_free_sc_done_list(octeon_dev);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	do {
77*4882a593Smuzhiyun 		spin_lock_bh(&ordered_sc_list->lock);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 		if (list_empty(&ordered_sc_list->head)) {
80*4882a593Smuzhiyun 			spin_unlock_bh(&ordered_sc_list->lock);
81*4882a593Smuzhiyun 			return 1;
82*4882a593Smuzhiyun 		}
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 		sc = list_first_entry(&ordered_sc_list->head,
85*4882a593Smuzhiyun 				      struct octeon_soft_command, node);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 		status = OCTEON_REQUEST_PENDING;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 		/* check if octeon has finished DMA'ing a response
90*4882a593Smuzhiyun 		 * to where rptr is pointing to
91*4882a593Smuzhiyun 		 */
92*4882a593Smuzhiyun 		status64 = *sc->status_word;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 		if (status64 != COMPLETION_WORD_INIT) {
95*4882a593Smuzhiyun 			/* This logic ensures that all 64b have been written.
96*4882a593Smuzhiyun 			 * 1. check byte 0 for non-FF
97*4882a593Smuzhiyun 			 * 2. if non-FF, then swap result from BE to host order
98*4882a593Smuzhiyun 			 * 3. check byte 7 (swapped to 0) for non-FF
99*4882a593Smuzhiyun 			 * 4. if non-FF, use the low 32-bit status code
100*4882a593Smuzhiyun 			 * 5. if either byte 0 or byte 7 is FF, don't use status
101*4882a593Smuzhiyun 			 */
102*4882a593Smuzhiyun 			if ((status64 & 0xff) != 0xff) {
103*4882a593Smuzhiyun 				octeon_swap_8B_data(&status64, 1);
104*4882a593Smuzhiyun 				if (((status64 & 0xff) != 0xff)) {
105*4882a593Smuzhiyun 					/* retrieve 16-bit firmware status */
106*4882a593Smuzhiyun 					status = (u32)(status64 & 0xffffULL);
107*4882a593Smuzhiyun 					if (status) {
108*4882a593Smuzhiyun 						status =
109*4882a593Smuzhiyun 						  FIRMWARE_STATUS_CODE(status);
110*4882a593Smuzhiyun 					} else {
111*4882a593Smuzhiyun 						/* i.e. no error */
112*4882a593Smuzhiyun 						status = OCTEON_REQUEST_DONE;
113*4882a593Smuzhiyun 					}
114*4882a593Smuzhiyun 				}
115*4882a593Smuzhiyun 			}
116*4882a593Smuzhiyun 		} else if (unlikely(force_quit) || (sc->expiry_time &&
117*4882a593Smuzhiyun 			time_after(jiffies, (unsigned long)sc->expiry_time))) {
118*4882a593Smuzhiyun 			struct octeon_instr_irh *irh =
119*4882a593Smuzhiyun 				(struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 			dev_err(&octeon_dev->pci_dev->dev, "%s: ", __func__);
122*4882a593Smuzhiyun 			dev_err(&octeon_dev->pci_dev->dev,
123*4882a593Smuzhiyun 				"cmd %x/%x/%llx/%llx failed, ",
124*4882a593Smuzhiyun 				irh->opcode, irh->subcode,
125*4882a593Smuzhiyun 				sc->cmd.cmd3.ossp[0], sc->cmd.cmd3.ossp[1]);
126*4882a593Smuzhiyun 			dev_err(&octeon_dev->pci_dev->dev,
127*4882a593Smuzhiyun 				"timeout (%ld, %ld)\n",
128*4882a593Smuzhiyun 				(long)jiffies, (long)sc->expiry_time);
129*4882a593Smuzhiyun 			status = OCTEON_REQUEST_TIMEOUT;
130*4882a593Smuzhiyun 		}
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 		if (status != OCTEON_REQUEST_PENDING) {
133*4882a593Smuzhiyun 			sc->sc_status = status;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 			/* we have received a response or we have timed out */
136*4882a593Smuzhiyun 			/* remove node from linked list */
137*4882a593Smuzhiyun 			list_del(&sc->node);
138*4882a593Smuzhiyun 			atomic_dec(&octeon_dev->response_list
139*4882a593Smuzhiyun 				   [OCTEON_ORDERED_SC_LIST].
140*4882a593Smuzhiyun 				   pending_req_count);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 			if (!sc->callback) {
143*4882a593Smuzhiyun 				atomic_inc(&octeon_dev->response_list
144*4882a593Smuzhiyun 					   [OCTEON_DONE_SC_LIST].
145*4882a593Smuzhiyun 					   pending_req_count);
146*4882a593Smuzhiyun 				list_add_tail(&sc->node,
147*4882a593Smuzhiyun 					      &octeon_dev->response_list
148*4882a593Smuzhiyun 					      [OCTEON_DONE_SC_LIST].head);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 				if (unlikely(READ_ONCE(sc->caller_is_done))) {
151*4882a593Smuzhiyun 					/* caller does not wait for response
152*4882a593Smuzhiyun 					 * from firmware
153*4882a593Smuzhiyun 					 */
154*4882a593Smuzhiyun 					if (status != OCTEON_REQUEST_DONE) {
155*4882a593Smuzhiyun 						struct octeon_instr_irh *irh;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 						irh =
158*4882a593Smuzhiyun 						    (struct octeon_instr_irh *)
159*4882a593Smuzhiyun 						    &sc->cmd.cmd3.irh;
160*4882a593Smuzhiyun 						dev_dbg
161*4882a593Smuzhiyun 						    (&octeon_dev->pci_dev->dev,
162*4882a593Smuzhiyun 						    "%s: sc failed: opcode=%x, ",
163*4882a593Smuzhiyun 						    __func__, irh->opcode);
164*4882a593Smuzhiyun 						dev_dbg
165*4882a593Smuzhiyun 						    (&octeon_dev->pci_dev->dev,
166*4882a593Smuzhiyun 						    "subcode=%x, ossp[0]=%llx, ",
167*4882a593Smuzhiyun 						    irh->subcode,
168*4882a593Smuzhiyun 						    sc->cmd.cmd3.ossp[0]);
169*4882a593Smuzhiyun 						dev_dbg
170*4882a593Smuzhiyun 						    (&octeon_dev->pci_dev->dev,
171*4882a593Smuzhiyun 						    "ossp[1]=%llx, status=%d\n",
172*4882a593Smuzhiyun 						    sc->cmd.cmd3.ossp[1],
173*4882a593Smuzhiyun 						    status);
174*4882a593Smuzhiyun 					}
175*4882a593Smuzhiyun 				} else {
176*4882a593Smuzhiyun 					complete(&sc->complete);
177*4882a593Smuzhiyun 				}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 				spin_unlock_bh(&ordered_sc_list->lock);
180*4882a593Smuzhiyun 			} else {
181*4882a593Smuzhiyun 				/* sc with callback function */
182*4882a593Smuzhiyun 				if (status == OCTEON_REQUEST_TIMEOUT) {
183*4882a593Smuzhiyun 					atomic_inc(&octeon_dev->response_list
184*4882a593Smuzhiyun 						   [OCTEON_ZOMBIE_SC_LIST].
185*4882a593Smuzhiyun 						   pending_req_count);
186*4882a593Smuzhiyun 					list_add_tail(&sc->node,
187*4882a593Smuzhiyun 						      &octeon_dev->response_list
188*4882a593Smuzhiyun 						      [OCTEON_ZOMBIE_SC_LIST].
189*4882a593Smuzhiyun 						      head);
190*4882a593Smuzhiyun 				}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 				spin_unlock_bh(&ordered_sc_list->lock);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 				sc->callback(octeon_dev, status,
195*4882a593Smuzhiyun 					     sc->callback_arg);
196*4882a593Smuzhiyun 				/* sc is freed by caller */
197*4882a593Smuzhiyun 			}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 			request_complete++;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 		} else {
202*4882a593Smuzhiyun 			/* no response yet */
203*4882a593Smuzhiyun 			request_complete = 0;
204*4882a593Smuzhiyun 			spin_unlock_bh
205*4882a593Smuzhiyun 			    (&ordered_sc_list->lock);
206*4882a593Smuzhiyun 		}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 		/* If we hit the Max Ordered requests to process every loop,
209*4882a593Smuzhiyun 		 * we quit
210*4882a593Smuzhiyun 		 * and let this function be invoked the next time the poll
211*4882a593Smuzhiyun 		 * thread runs
212*4882a593Smuzhiyun 		 * to process the remaining requests. This function can take up
213*4882a593Smuzhiyun 		 * the entire CPU if there is no upper limit to the requests
214*4882a593Smuzhiyun 		 * processed.
215*4882a593Smuzhiyun 		 */
216*4882a593Smuzhiyun 		if (request_complete >= resp_to_process)
217*4882a593Smuzhiyun 			break;
218*4882a593Smuzhiyun 	} while (request_complete);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	return 0;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
oct_poll_req_completion(struct work_struct * work)223*4882a593Smuzhiyun static void oct_poll_req_completion(struct work_struct *work)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	struct cavium_wk *wk = (struct cavium_wk *)work;
226*4882a593Smuzhiyun 	struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
227*4882a593Smuzhiyun 	struct cavium_wq *cwq = &oct->dma_comp_wq;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	lio_process_ordered_list(oct, 0);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (atomic_read(&oct->response_list
232*4882a593Smuzhiyun 			[OCTEON_ORDERED_SC_LIST].pending_req_count))
233*4882a593Smuzhiyun 		queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1));
234*4882a593Smuzhiyun }
235