1*4882a593Smuzhiyun /***********************license start***************
2*4882a593Smuzhiyun * Author: Cavium Networks
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Contact: support@caviumnetworks.com
5*4882a593Smuzhiyun * This file is part of the OCTEON SDK
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (c) 2003-2008 Cavium Networks
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This file is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun * it under the terms of the GNU General Public License, Version 2, as
11*4882a593Smuzhiyun * published by the Free Software Foundation.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This file is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15*4882a593Smuzhiyun * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16*4882a593Smuzhiyun * NONINFRINGEMENT. See the GNU General Public License for more
17*4882a593Smuzhiyun * details.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
20*4882a593Smuzhiyun * along with this file; if not, write to the Free Software
21*4882a593Smuzhiyun * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22*4882a593Smuzhiyun * or visit http://www.gnu.org/licenses/.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * This file may also be available under a different license from Cavium.
25*4882a593Smuzhiyun * Contact Cavium Networks for more information
26*4882a593Smuzhiyun ***********************license end**************************************/
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * Support functions for managing command queues used for
30*4882a593Smuzhiyun * various hardware blocks.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <linux/kernel.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <asm/octeon/octeon.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include <asm/octeon/cvmx-config.h>
38*4882a593Smuzhiyun #include <asm/octeon/cvmx-fpa.h>
39*4882a593Smuzhiyun #include <asm/octeon/cvmx-cmd-queue.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #include <asm/octeon/cvmx-npei-defs.h>
42*4882a593Smuzhiyun #include <asm/octeon/cvmx-pexp-defs.h>
43*4882a593Smuzhiyun #include <asm/octeon/cvmx-pko-defs.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun * This application uses this pointer to access the global queue
47*4882a593Smuzhiyun * state. It points to a bootmem named block.
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
50*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun * Initialize the Global queue state pointer.
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
56*4882a593Smuzhiyun */
__cvmx_cmd_queue_init_state_ptr(void)57*4882a593Smuzhiyun static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun char *alloc_name = "cvmx_cmd_queues";
60*4882a593Smuzhiyun #if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
61*4882a593Smuzhiyun extern uint64_t octeon_reserve32_memory;
62*4882a593Smuzhiyun #endif
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun if (likely(__cvmx_cmd_queue_state_ptr))
65*4882a593Smuzhiyun return CVMX_CMD_QUEUE_SUCCESS;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
68*4882a593Smuzhiyun if (octeon_reserve32_memory)
69*4882a593Smuzhiyun __cvmx_cmd_queue_state_ptr =
70*4882a593Smuzhiyun cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
71*4882a593Smuzhiyun octeon_reserve32_memory,
72*4882a593Smuzhiyun octeon_reserve32_memory +
73*4882a593Smuzhiyun (CONFIG_CAVIUM_RESERVE32 <<
74*4882a593Smuzhiyun 20) - 1, 128, alloc_name);
75*4882a593Smuzhiyun else
76*4882a593Smuzhiyun #endif
77*4882a593Smuzhiyun __cvmx_cmd_queue_state_ptr =
78*4882a593Smuzhiyun cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
79*4882a593Smuzhiyun 128,
80*4882a593Smuzhiyun alloc_name);
81*4882a593Smuzhiyun if (__cvmx_cmd_queue_state_ptr)
82*4882a593Smuzhiyun memset(__cvmx_cmd_queue_state_ptr, 0,
83*4882a593Smuzhiyun sizeof(*__cvmx_cmd_queue_state_ptr));
84*4882a593Smuzhiyun else {
85*4882a593Smuzhiyun struct cvmx_bootmem_named_block_desc *block_desc =
86*4882a593Smuzhiyun cvmx_bootmem_find_named_block(alloc_name);
87*4882a593Smuzhiyun if (block_desc)
88*4882a593Smuzhiyun __cvmx_cmd_queue_state_ptr =
89*4882a593Smuzhiyun cvmx_phys_to_ptr(block_desc->base_addr);
90*4882a593Smuzhiyun else {
91*4882a593Smuzhiyun cvmx_dprintf
92*4882a593Smuzhiyun ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
93*4882a593Smuzhiyun alloc_name);
94*4882a593Smuzhiyun return CVMX_CMD_QUEUE_NO_MEMORY;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun return CVMX_CMD_QUEUE_SUCCESS;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun * Initialize a command queue for use. The initial FPA buffer is
102*4882a593Smuzhiyun * allocated and the hardware unit is configured to point to the
103*4882a593Smuzhiyun * new command queue.
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * @queue_id: Hardware command queue to initialize.
106*4882a593Smuzhiyun * @max_depth: Maximum outstanding commands that can be queued.
107*4882a593Smuzhiyun * @fpa_pool: FPA pool the command queues should come from.
108*4882a593Smuzhiyun * @pool_size: Size of each buffer in the FPA pool (bytes)
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
111*4882a593Smuzhiyun */
cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,int max_depth,int fpa_pool,int pool_size)112*4882a593Smuzhiyun cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
113*4882a593Smuzhiyun int max_depth, int fpa_pool,
114*4882a593Smuzhiyun int pool_size)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun __cvmx_cmd_queue_state_t *qstate;
117*4882a593Smuzhiyun cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
118*4882a593Smuzhiyun if (result != CVMX_CMD_QUEUE_SUCCESS)
119*4882a593Smuzhiyun return result;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun qstate = __cvmx_cmd_queue_get_state(queue_id);
122*4882a593Smuzhiyun if (qstate == NULL)
123*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * We artificially limit max_depth to 1<<20 words. It is an
127*4882a593Smuzhiyun * arbitrary limit.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
130*4882a593Smuzhiyun if ((max_depth < 0) || (max_depth > 1 << 20))
131*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
132*4882a593Smuzhiyun } else if (max_depth != 0)
133*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if ((fpa_pool < 0) || (fpa_pool > 7))
136*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
137*4882a593Smuzhiyun if ((pool_size < 128) || (pool_size > 65536))
138*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* See if someone else has already initialized the queue */
141*4882a593Smuzhiyun if (qstate->base_ptr_div128) {
142*4882a593Smuzhiyun if (max_depth != (int)qstate->max_depth) {
143*4882a593Smuzhiyun cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
144*4882a593Smuzhiyun "Queue already initialized with different "
145*4882a593Smuzhiyun "max_depth (%d).\n",
146*4882a593Smuzhiyun (int)qstate->max_depth);
147*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun if (fpa_pool != qstate->fpa_pool) {
150*4882a593Smuzhiyun cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
151*4882a593Smuzhiyun "Queue already initialized with different "
152*4882a593Smuzhiyun "FPA pool (%u).\n",
153*4882a593Smuzhiyun qstate->fpa_pool);
154*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
157*4882a593Smuzhiyun cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
158*4882a593Smuzhiyun "Queue already initialized with different "
159*4882a593Smuzhiyun "FPA pool size (%u).\n",
160*4882a593Smuzhiyun (qstate->pool_size_m1 + 1) << 3);
161*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun CVMX_SYNCWS;
164*4882a593Smuzhiyun return CVMX_CMD_QUEUE_ALREADY_SETUP;
165*4882a593Smuzhiyun } else {
166*4882a593Smuzhiyun union cvmx_fpa_ctl_status status;
167*4882a593Smuzhiyun void *buffer;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
170*4882a593Smuzhiyun if (!status.s.enb) {
171*4882a593Smuzhiyun cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
172*4882a593Smuzhiyun "FPA is not enabled.\n");
173*4882a593Smuzhiyun return CVMX_CMD_QUEUE_NO_MEMORY;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun buffer = cvmx_fpa_alloc(fpa_pool);
176*4882a593Smuzhiyun if (buffer == NULL) {
177*4882a593Smuzhiyun cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
178*4882a593Smuzhiyun "Unable to allocate initial buffer.\n");
179*4882a593Smuzhiyun return CVMX_CMD_QUEUE_NO_MEMORY;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun memset(qstate, 0, sizeof(*qstate));
183*4882a593Smuzhiyun qstate->max_depth = max_depth;
184*4882a593Smuzhiyun qstate->fpa_pool = fpa_pool;
185*4882a593Smuzhiyun qstate->pool_size_m1 = (pool_size >> 3) - 1;
186*4882a593Smuzhiyun qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun * We zeroed the now serving field so we need to also
189*4882a593Smuzhiyun * zero the ticket.
190*4882a593Smuzhiyun */
191*4882a593Smuzhiyun __cvmx_cmd_queue_state_ptr->
192*4882a593Smuzhiyun ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
193*4882a593Smuzhiyun CVMX_SYNCWS;
194*4882a593Smuzhiyun return CVMX_CMD_QUEUE_SUCCESS;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /**
199*4882a593Smuzhiyun * Shutdown a queue a free it's command buffers to the FPA. The
200*4882a593Smuzhiyun * hardware connected to the queue must be stopped before this
201*4882a593Smuzhiyun * function is called.
202*4882a593Smuzhiyun *
203*4882a593Smuzhiyun * @queue_id: Queue to shutdown
204*4882a593Smuzhiyun *
205*4882a593Smuzhiyun * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
206*4882a593Smuzhiyun */
cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)207*4882a593Smuzhiyun cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
210*4882a593Smuzhiyun if (qptr == NULL) {
211*4882a593Smuzhiyun cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
212*4882a593Smuzhiyun "get queue information.\n");
213*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (cvmx_cmd_queue_length(queue_id) > 0) {
217*4882a593Smuzhiyun cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
218*4882a593Smuzhiyun "has data in it.\n");
219*4882a593Smuzhiyun return CVMX_CMD_QUEUE_FULL;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun __cvmx_cmd_queue_lock(queue_id, qptr);
223*4882a593Smuzhiyun if (qptr->base_ptr_div128) {
224*4882a593Smuzhiyun cvmx_fpa_free(cvmx_phys_to_ptr
225*4882a593Smuzhiyun ((uint64_t) qptr->base_ptr_div128 << 7),
226*4882a593Smuzhiyun qptr->fpa_pool, 0);
227*4882a593Smuzhiyun qptr->base_ptr_div128 = 0;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun __cvmx_cmd_queue_unlock(qptr);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun return CVMX_CMD_QUEUE_SUCCESS;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /**
235*4882a593Smuzhiyun * Return the number of command words pending in the queue. This
236*4882a593Smuzhiyun * function may be relatively slow for some hardware units.
237*4882a593Smuzhiyun *
238*4882a593Smuzhiyun * @queue_id: Hardware command queue to query
239*4882a593Smuzhiyun *
240*4882a593Smuzhiyun * Returns Number of outstanding commands
241*4882a593Smuzhiyun */
cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)242*4882a593Smuzhiyun int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun if (CVMX_ENABLE_PARAMETER_CHECKING) {
245*4882a593Smuzhiyun if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
246*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun * The cast is here so gcc with check that all values in the
251*4882a593Smuzhiyun * cvmx_cmd_queue_id_t enumeration are here.
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
254*4882a593Smuzhiyun case CVMX_CMD_QUEUE_PKO_BASE:
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun * FIXME: Need atomic lock on
257*4882a593Smuzhiyun * CVMX_PKO_REG_READ_IDX. Right now we are normally
258*4882a593Smuzhiyun * called with the queue lock, so that is a SLIGHT
259*4882a593Smuzhiyun * amount of protection.
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
262*4882a593Smuzhiyun if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
263*4882a593Smuzhiyun union cvmx_pko_mem_debug9 debug9;
264*4882a593Smuzhiyun debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
265*4882a593Smuzhiyun return debug9.cn38xx.doorbell;
266*4882a593Smuzhiyun } else {
267*4882a593Smuzhiyun union cvmx_pko_mem_debug8 debug8;
268*4882a593Smuzhiyun debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
269*4882a593Smuzhiyun return debug8.cn50xx.doorbell;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun case CVMX_CMD_QUEUE_ZIP:
272*4882a593Smuzhiyun case CVMX_CMD_QUEUE_DFA:
273*4882a593Smuzhiyun case CVMX_CMD_QUEUE_RAID:
274*4882a593Smuzhiyun /* FIXME: Implement other lengths */
275*4882a593Smuzhiyun return 0;
276*4882a593Smuzhiyun case CVMX_CMD_QUEUE_DMA_BASE:
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun union cvmx_npei_dmax_counts dmax_counts;
279*4882a593Smuzhiyun dmax_counts.u64 =
280*4882a593Smuzhiyun cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
281*4882a593Smuzhiyun (queue_id & 0x7));
282*4882a593Smuzhiyun return dmax_counts.s.dbell;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun case CVMX_CMD_QUEUE_END:
285*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun return CVMX_CMD_QUEUE_INVALID_PARAM;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun * Return the command buffer to be written to. The purpose of this
292*4882a593Smuzhiyun * function is to allow CVMX routine access t othe low level buffer
293*4882a593Smuzhiyun * for initial hardware setup. User applications should not call this
294*4882a593Smuzhiyun * function directly.
295*4882a593Smuzhiyun *
296*4882a593Smuzhiyun * @queue_id: Command queue to query
297*4882a593Smuzhiyun *
298*4882a593Smuzhiyun * Returns Command buffer or NULL on failure
299*4882a593Smuzhiyun */
cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)300*4882a593Smuzhiyun void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
303*4882a593Smuzhiyun if (qptr && qptr->base_ptr_div128)
304*4882a593Smuzhiyun return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
305*4882a593Smuzhiyun else
306*4882a593Smuzhiyun return NULL;
307*4882a593Smuzhiyun }
308