xref: /OK3568_Linux_fs/kernel/drivers/block/rsxx/cregs.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Filename: cregs.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6*4882a593Smuzhiyun *	Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * (C) Copyright 2013 IBM Corporation
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/completion.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "rsxx_priv.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define CREG_TIMEOUT_MSEC	10000
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
19*4882a593Smuzhiyun 			    struct creg_cmd *cmd,
20*4882a593Smuzhiyun 			    int st);
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun struct creg_cmd {
23*4882a593Smuzhiyun 	struct list_head list;
24*4882a593Smuzhiyun 	creg_cmd_cb cb;
25*4882a593Smuzhiyun 	void *cb_private;
26*4882a593Smuzhiyun 	unsigned int op;
27*4882a593Smuzhiyun 	unsigned int addr;
28*4882a593Smuzhiyun 	int cnt8;
29*4882a593Smuzhiyun 	void *buf;
30*4882a593Smuzhiyun 	unsigned int stream;
31*4882a593Smuzhiyun 	unsigned int status;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun static struct kmem_cache *creg_cmd_pool;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /*------------ Private Functions --------------*/
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #if defined(__LITTLE_ENDIAN)
40*4882a593Smuzhiyun #define LITTLE_ENDIAN 1
41*4882a593Smuzhiyun #elif defined(__BIG_ENDIAN)
42*4882a593Smuzhiyun #define LITTLE_ENDIAN 0
43*4882a593Smuzhiyun #else
44*4882a593Smuzhiyun #error Unknown endianess!!! Aborting...
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun 
copy_to_creg_data(struct rsxx_cardinfo * card,int cnt8,void * buf,unsigned int stream)47*4882a593Smuzhiyun static int copy_to_creg_data(struct rsxx_cardinfo *card,
48*4882a593Smuzhiyun 			      int cnt8,
49*4882a593Smuzhiyun 			      void *buf,
50*4882a593Smuzhiyun 			      unsigned int stream)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	int i = 0;
53*4882a593Smuzhiyun 	u32 *data = buf;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	if (unlikely(card->eeh_state))
56*4882a593Smuzhiyun 		return -EIO;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
59*4882a593Smuzhiyun 		/*
60*4882a593Smuzhiyun 		 * Firmware implementation makes it necessary to byte swap on
61*4882a593Smuzhiyun 		 * little endian processors.
62*4882a593Smuzhiyun 		 */
63*4882a593Smuzhiyun 		if (LITTLE_ENDIAN && stream)
64*4882a593Smuzhiyun 			iowrite32be(data[i], card->regmap + CREG_DATA(i));
65*4882a593Smuzhiyun 		else
66*4882a593Smuzhiyun 			iowrite32(data[i], card->regmap + CREG_DATA(i));
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	return 0;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 
copy_from_creg_data(struct rsxx_cardinfo * card,int cnt8,void * buf,unsigned int stream)73*4882a593Smuzhiyun static int copy_from_creg_data(struct rsxx_cardinfo *card,
74*4882a593Smuzhiyun 				int cnt8,
75*4882a593Smuzhiyun 				void *buf,
76*4882a593Smuzhiyun 				unsigned int stream)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	int i = 0;
79*4882a593Smuzhiyun 	u32 *data = buf;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	if (unlikely(card->eeh_state))
82*4882a593Smuzhiyun 		return -EIO;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
85*4882a593Smuzhiyun 		/*
86*4882a593Smuzhiyun 		 * Firmware implementation makes it necessary to byte swap on
87*4882a593Smuzhiyun 		 * little endian processors.
88*4882a593Smuzhiyun 		 */
89*4882a593Smuzhiyun 		if (LITTLE_ENDIAN && stream)
90*4882a593Smuzhiyun 			data[i] = ioread32be(card->regmap + CREG_DATA(i));
91*4882a593Smuzhiyun 		else
92*4882a593Smuzhiyun 			data[i] = ioread32(card->regmap + CREG_DATA(i));
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	return 0;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
creg_issue_cmd(struct rsxx_cardinfo * card,struct creg_cmd * cmd)98*4882a593Smuzhiyun static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	int st;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	if (unlikely(card->eeh_state))
103*4882a593Smuzhiyun 		return;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	iowrite32(cmd->addr, card->regmap + CREG_ADD);
106*4882a593Smuzhiyun 	iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (cmd->op == CREG_OP_WRITE) {
109*4882a593Smuzhiyun 		if (cmd->buf) {
110*4882a593Smuzhiyun 			st = copy_to_creg_data(card, cmd->cnt8,
111*4882a593Smuzhiyun 					       cmd->buf, cmd->stream);
112*4882a593Smuzhiyun 			if (st)
113*4882a593Smuzhiyun 				return;
114*4882a593Smuzhiyun 		}
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	if (unlikely(card->eeh_state))
118*4882a593Smuzhiyun 		return;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	/* Setting the valid bit will kick off the command. */
121*4882a593Smuzhiyun 	iowrite32(cmd->op, card->regmap + CREG_CMD);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
creg_kick_queue(struct rsxx_cardinfo * card)124*4882a593Smuzhiyun static void creg_kick_queue(struct rsxx_cardinfo *card)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
127*4882a593Smuzhiyun 		return;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	card->creg_ctrl.active = 1;
130*4882a593Smuzhiyun 	card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
131*4882a593Smuzhiyun 						      struct creg_cmd, list);
132*4882a593Smuzhiyun 	list_del(&card->creg_ctrl.active_cmd->list);
133*4882a593Smuzhiyun 	card->creg_ctrl.q_depth--;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/*
136*4882a593Smuzhiyun 	 * We have to set the timer before we push the new command. Otherwise,
137*4882a593Smuzhiyun 	 * we could create a race condition that would occur if the timer
138*4882a593Smuzhiyun 	 * was not canceled, and expired after the new command was pushed,
139*4882a593Smuzhiyun 	 * but before the command was issued to hardware.
140*4882a593Smuzhiyun 	 */
141*4882a593Smuzhiyun 	mod_timer(&card->creg_ctrl.cmd_timer,
142*4882a593Smuzhiyun 				jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	creg_issue_cmd(card, card->creg_ctrl.active_cmd);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
creg_queue_cmd(struct rsxx_cardinfo * card,unsigned int op,unsigned int addr,unsigned int cnt8,void * buf,int stream,creg_cmd_cb callback,void * cb_private)147*4882a593Smuzhiyun static int creg_queue_cmd(struct rsxx_cardinfo *card,
148*4882a593Smuzhiyun 			  unsigned int op,
149*4882a593Smuzhiyun 			  unsigned int addr,
150*4882a593Smuzhiyun 			  unsigned int cnt8,
151*4882a593Smuzhiyun 			  void *buf,
152*4882a593Smuzhiyun 			  int stream,
153*4882a593Smuzhiyun 			  creg_cmd_cb callback,
154*4882a593Smuzhiyun 			  void *cb_private)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	struct creg_cmd *cmd;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* Don't queue stuff up if we're halted. */
159*4882a593Smuzhiyun 	if (unlikely(card->halt))
160*4882a593Smuzhiyun 		return -EINVAL;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (card->creg_ctrl.reset)
163*4882a593Smuzhiyun 		return -EAGAIN;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	if (cnt8 > MAX_CREG_DATA8)
166*4882a593Smuzhiyun 		return -EINVAL;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
169*4882a593Smuzhiyun 	if (!cmd)
170*4882a593Smuzhiyun 		return -ENOMEM;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cmd->list);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	cmd->op		= op;
175*4882a593Smuzhiyun 	cmd->addr	= addr;
176*4882a593Smuzhiyun 	cmd->cnt8	= cnt8;
177*4882a593Smuzhiyun 	cmd->buf	= buf;
178*4882a593Smuzhiyun 	cmd->stream	= stream;
179*4882a593Smuzhiyun 	cmd->cb		= callback;
180*4882a593Smuzhiyun 	cmd->cb_private = cb_private;
181*4882a593Smuzhiyun 	cmd->status	= 0;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	spin_lock_bh(&card->creg_ctrl.lock);
184*4882a593Smuzhiyun 	list_add_tail(&cmd->list, &card->creg_ctrl.queue);
185*4882a593Smuzhiyun 	card->creg_ctrl.q_depth++;
186*4882a593Smuzhiyun 	creg_kick_queue(card);
187*4882a593Smuzhiyun 	spin_unlock_bh(&card->creg_ctrl.lock);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	return 0;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
creg_cmd_timed_out(struct timer_list * t)192*4882a593Smuzhiyun static void creg_cmd_timed_out(struct timer_list *t)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
195*4882a593Smuzhiyun 	struct creg_cmd *cmd;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	spin_lock(&card->creg_ctrl.lock);
198*4882a593Smuzhiyun 	cmd = card->creg_ctrl.active_cmd;
199*4882a593Smuzhiyun 	card->creg_ctrl.active_cmd = NULL;
200*4882a593Smuzhiyun 	spin_unlock(&card->creg_ctrl.lock);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (cmd == NULL) {
203*4882a593Smuzhiyun 		card->creg_ctrl.creg_stats.creg_timeout++;
204*4882a593Smuzhiyun 		dev_warn(CARD_TO_DEV(card),
205*4882a593Smuzhiyun 			"No active command associated with timeout!\n");
206*4882a593Smuzhiyun 		return;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (cmd->cb)
210*4882a593Smuzhiyun 		cmd->cb(card, cmd, -ETIMEDOUT);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	kmem_cache_free(creg_cmd_pool, cmd);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	spin_lock(&card->creg_ctrl.lock);
216*4882a593Smuzhiyun 	card->creg_ctrl.active = 0;
217*4882a593Smuzhiyun 	creg_kick_queue(card);
218*4882a593Smuzhiyun 	spin_unlock(&card->creg_ctrl.lock);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 
creg_cmd_done(struct work_struct * work)222*4882a593Smuzhiyun static void creg_cmd_done(struct work_struct *work)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	struct rsxx_cardinfo *card;
225*4882a593Smuzhiyun 	struct creg_cmd *cmd;
226*4882a593Smuzhiyun 	int st = 0;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	card = container_of(work, struct rsxx_cardinfo,
229*4882a593Smuzhiyun 			    creg_ctrl.done_work);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/*
232*4882a593Smuzhiyun 	 * The timer could not be cancelled for some reason,
233*4882a593Smuzhiyun 	 * race to pop the active command.
234*4882a593Smuzhiyun 	 */
235*4882a593Smuzhiyun 	if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
236*4882a593Smuzhiyun 		card->creg_ctrl.creg_stats.failed_cancel_timer++;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	spin_lock_bh(&card->creg_ctrl.lock);
239*4882a593Smuzhiyun 	cmd = card->creg_ctrl.active_cmd;
240*4882a593Smuzhiyun 	card->creg_ctrl.active_cmd = NULL;
241*4882a593Smuzhiyun 	spin_unlock_bh(&card->creg_ctrl.lock);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (cmd == NULL) {
244*4882a593Smuzhiyun 		dev_err(CARD_TO_DEV(card),
245*4882a593Smuzhiyun 			"Spurious creg interrupt!\n");
246*4882a593Smuzhiyun 		return;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
250*4882a593Smuzhiyun 	cmd->status = card->creg_ctrl.creg_stats.stat;
251*4882a593Smuzhiyun 	if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
252*4882a593Smuzhiyun 		dev_err(CARD_TO_DEV(card),
253*4882a593Smuzhiyun 			"Invalid status on creg command\n");
254*4882a593Smuzhiyun 		/*
255*4882a593Smuzhiyun 		 * At this point we're probably reading garbage from HW. Don't
256*4882a593Smuzhiyun 		 * do anything else that could mess up the system and let
257*4882a593Smuzhiyun 		 * the sync function return an error.
258*4882a593Smuzhiyun 		 */
259*4882a593Smuzhiyun 		st = -EIO;
260*4882a593Smuzhiyun 		goto creg_done;
261*4882a593Smuzhiyun 	} else if (cmd->status & CREG_STAT_ERROR) {
262*4882a593Smuzhiyun 		st = -EIO;
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	if (cmd->op == CREG_OP_READ) {
266*4882a593Smuzhiyun 		unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		/* Paranoid Sanity Checks */
269*4882a593Smuzhiyun 		if (!cmd->buf) {
270*4882a593Smuzhiyun 			dev_err(CARD_TO_DEV(card),
271*4882a593Smuzhiyun 				"Buffer not given for read.\n");
272*4882a593Smuzhiyun 			st = -EIO;
273*4882a593Smuzhiyun 			goto creg_done;
274*4882a593Smuzhiyun 		}
275*4882a593Smuzhiyun 		if (cnt8 != cmd->cnt8) {
276*4882a593Smuzhiyun 			dev_err(CARD_TO_DEV(card),
277*4882a593Smuzhiyun 				"count mismatch\n");
278*4882a593Smuzhiyun 			st = -EIO;
279*4882a593Smuzhiyun 			goto creg_done;
280*4882a593Smuzhiyun 		}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 		st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
283*4882a593Smuzhiyun 	}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun creg_done:
286*4882a593Smuzhiyun 	if (cmd->cb)
287*4882a593Smuzhiyun 		cmd->cb(card, cmd, st);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	kmem_cache_free(creg_cmd_pool, cmd);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	spin_lock_bh(&card->creg_ctrl.lock);
292*4882a593Smuzhiyun 	card->creg_ctrl.active = 0;
293*4882a593Smuzhiyun 	creg_kick_queue(card);
294*4882a593Smuzhiyun 	spin_unlock_bh(&card->creg_ctrl.lock);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun 
creg_reset(struct rsxx_cardinfo * card)297*4882a593Smuzhiyun static void creg_reset(struct rsxx_cardinfo *card)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct creg_cmd *cmd = NULL;
300*4882a593Smuzhiyun 	struct creg_cmd *tmp;
301*4882a593Smuzhiyun 	unsigned long flags;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	/*
304*4882a593Smuzhiyun 	 * mutex_trylock is used here because if reset_lock is taken then a
305*4882a593Smuzhiyun 	 * reset is already happening. So, we can just go ahead and return.
306*4882a593Smuzhiyun 	 */
307*4882a593Smuzhiyun 	if (!mutex_trylock(&card->creg_ctrl.reset_lock))
308*4882a593Smuzhiyun 		return;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	card->creg_ctrl.reset = 1;
311*4882a593Smuzhiyun 	spin_lock_irqsave(&card->irq_lock, flags);
312*4882a593Smuzhiyun 	rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
313*4882a593Smuzhiyun 	spin_unlock_irqrestore(&card->irq_lock, flags);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	dev_warn(CARD_TO_DEV(card),
316*4882a593Smuzhiyun 		"Resetting creg interface for recovery\n");
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	/* Cancel outstanding commands */
319*4882a593Smuzhiyun 	spin_lock_bh(&card->creg_ctrl.lock);
320*4882a593Smuzhiyun 	list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
321*4882a593Smuzhiyun 		list_del(&cmd->list);
322*4882a593Smuzhiyun 		card->creg_ctrl.q_depth--;
323*4882a593Smuzhiyun 		if (cmd->cb)
324*4882a593Smuzhiyun 			cmd->cb(card, cmd, -ECANCELED);
325*4882a593Smuzhiyun 		kmem_cache_free(creg_cmd_pool, cmd);
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	cmd = card->creg_ctrl.active_cmd;
329*4882a593Smuzhiyun 	card->creg_ctrl.active_cmd = NULL;
330*4882a593Smuzhiyun 	if (cmd) {
331*4882a593Smuzhiyun 		if (timer_pending(&card->creg_ctrl.cmd_timer))
332*4882a593Smuzhiyun 			del_timer_sync(&card->creg_ctrl.cmd_timer);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		if (cmd->cb)
335*4882a593Smuzhiyun 			cmd->cb(card, cmd, -ECANCELED);
336*4882a593Smuzhiyun 		kmem_cache_free(creg_cmd_pool, cmd);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 		card->creg_ctrl.active = 0;
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 	spin_unlock_bh(&card->creg_ctrl.lock);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	card->creg_ctrl.reset = 0;
343*4882a593Smuzhiyun 	spin_lock_irqsave(&card->irq_lock, flags);
344*4882a593Smuzhiyun 	rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
345*4882a593Smuzhiyun 	spin_unlock_irqrestore(&card->irq_lock, flags);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	mutex_unlock(&card->creg_ctrl.reset_lock);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /* Used for synchronous accesses */
351*4882a593Smuzhiyun struct creg_completion {
352*4882a593Smuzhiyun 	struct completion	*cmd_done;
353*4882a593Smuzhiyun 	int			st;
354*4882a593Smuzhiyun 	u32			creg_status;
355*4882a593Smuzhiyun };
356*4882a593Smuzhiyun 
creg_cmd_done_cb(struct rsxx_cardinfo * card,struct creg_cmd * cmd,int st)357*4882a593Smuzhiyun static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
358*4882a593Smuzhiyun 			     struct creg_cmd *cmd,
359*4882a593Smuzhiyun 			     int st)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	struct creg_completion *cmd_completion;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	cmd_completion = cmd->cb_private;
364*4882a593Smuzhiyun 	BUG_ON(!cmd_completion);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	cmd_completion->st = st;
367*4882a593Smuzhiyun 	cmd_completion->creg_status = cmd->status;
368*4882a593Smuzhiyun 	complete(cmd_completion->cmd_done);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
__issue_creg_rw(struct rsxx_cardinfo * card,unsigned int op,unsigned int addr,unsigned int cnt8,void * buf,int stream,unsigned int * hw_stat)371*4882a593Smuzhiyun static int __issue_creg_rw(struct rsxx_cardinfo *card,
372*4882a593Smuzhiyun 			   unsigned int op,
373*4882a593Smuzhiyun 			   unsigned int addr,
374*4882a593Smuzhiyun 			   unsigned int cnt8,
375*4882a593Smuzhiyun 			   void *buf,
376*4882a593Smuzhiyun 			   int stream,
377*4882a593Smuzhiyun 			   unsigned int *hw_stat)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(cmd_done);
380*4882a593Smuzhiyun 	struct creg_completion completion;
381*4882a593Smuzhiyun 	unsigned long timeout;
382*4882a593Smuzhiyun 	int st;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	completion.cmd_done = &cmd_done;
385*4882a593Smuzhiyun 	completion.st = 0;
386*4882a593Smuzhiyun 	completion.creg_status = 0;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
389*4882a593Smuzhiyun 			    &completion);
390*4882a593Smuzhiyun 	if (st)
391*4882a593Smuzhiyun 		return st;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	/*
394*4882a593Smuzhiyun 	 * This timeout is necessary for unresponsive hardware. The additional
395*4882a593Smuzhiyun 	 * 20 seconds to used to guarantee that each cregs requests has time to
396*4882a593Smuzhiyun 	 * complete.
397*4882a593Smuzhiyun 	 */
398*4882a593Smuzhiyun 	timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
399*4882a593Smuzhiyun 				   card->creg_ctrl.q_depth + 20000);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/*
402*4882a593Smuzhiyun 	 * The creg interface is guaranteed to complete. It has a timeout
403*4882a593Smuzhiyun 	 * mechanism that will kick in if hardware does not respond.
404*4882a593Smuzhiyun 	 */
405*4882a593Smuzhiyun 	st = wait_for_completion_timeout(completion.cmd_done, timeout);
406*4882a593Smuzhiyun 	if (st == 0) {
407*4882a593Smuzhiyun 		/*
408*4882a593Smuzhiyun 		 * This is really bad, because the kernel timer did not
409*4882a593Smuzhiyun 		 * expire and notify us of a timeout!
410*4882a593Smuzhiyun 		 */
411*4882a593Smuzhiyun 		dev_crit(CARD_TO_DEV(card),
412*4882a593Smuzhiyun 			"cregs timer failed\n");
413*4882a593Smuzhiyun 		creg_reset(card);
414*4882a593Smuzhiyun 		return -EIO;
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	*hw_stat = completion.creg_status;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if (completion.st) {
420*4882a593Smuzhiyun 		/*
421*4882a593Smuzhiyun 		* This read is needed to verify that there has not been any
422*4882a593Smuzhiyun 		* extreme errors that might have occurred, i.e. EEH. The
423*4882a593Smuzhiyun 		* function iowrite32 will not detect EEH errors, so it is
424*4882a593Smuzhiyun 		* necessary that we recover if such an error is the reason
425*4882a593Smuzhiyun 		* for the timeout. This is a dummy read.
426*4882a593Smuzhiyun 		*/
427*4882a593Smuzhiyun 		ioread32(card->regmap + SCRATCH);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 		dev_warn(CARD_TO_DEV(card),
430*4882a593Smuzhiyun 			"creg command failed(%d x%08x)\n",
431*4882a593Smuzhiyun 			completion.st, addr);
432*4882a593Smuzhiyun 		return completion.st;
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	return 0;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
issue_creg_rw(struct rsxx_cardinfo * card,u32 addr,unsigned int size8,void * data,int stream,int read)438*4882a593Smuzhiyun static int issue_creg_rw(struct rsxx_cardinfo *card,
439*4882a593Smuzhiyun 			 u32 addr,
440*4882a593Smuzhiyun 			 unsigned int size8,
441*4882a593Smuzhiyun 			 void *data,
442*4882a593Smuzhiyun 			 int stream,
443*4882a593Smuzhiyun 			 int read)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	unsigned int hw_stat;
446*4882a593Smuzhiyun 	unsigned int xfer;
447*4882a593Smuzhiyun 	unsigned int op;
448*4882a593Smuzhiyun 	int st;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	op = read ? CREG_OP_READ : CREG_OP_WRITE;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	do {
453*4882a593Smuzhiyun 		xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 		st = __issue_creg_rw(card, op, addr, xfer,
456*4882a593Smuzhiyun 				     data, stream, &hw_stat);
457*4882a593Smuzhiyun 		if (st)
458*4882a593Smuzhiyun 			return st;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		data   = (char *)data + xfer;
461*4882a593Smuzhiyun 		addr  += xfer;
462*4882a593Smuzhiyun 		size8 -= xfer;
463*4882a593Smuzhiyun 	} while (size8);
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	return 0;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun /* ---------------------------- Public API ---------------------------------- */
rsxx_creg_write(struct rsxx_cardinfo * card,u32 addr,unsigned int size8,void * data,int byte_stream)469*4882a593Smuzhiyun int rsxx_creg_write(struct rsxx_cardinfo *card,
470*4882a593Smuzhiyun 			u32 addr,
471*4882a593Smuzhiyun 			unsigned int size8,
472*4882a593Smuzhiyun 			void *data,
473*4882a593Smuzhiyun 			int byte_stream)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun 	return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun 
rsxx_creg_read(struct rsxx_cardinfo * card,u32 addr,unsigned int size8,void * data,int byte_stream)478*4882a593Smuzhiyun int rsxx_creg_read(struct rsxx_cardinfo *card,
479*4882a593Smuzhiyun 		       u32 addr,
480*4882a593Smuzhiyun 		       unsigned int size8,
481*4882a593Smuzhiyun 		       void *data,
482*4882a593Smuzhiyun 		       int byte_stream)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
rsxx_get_card_state(struct rsxx_cardinfo * card,unsigned int * state)487*4882a593Smuzhiyun int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
490*4882a593Smuzhiyun 				  sizeof(*state), state, 0);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun 
rsxx_get_card_size8(struct rsxx_cardinfo * card,u64 * size8)493*4882a593Smuzhiyun int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	unsigned int size;
496*4882a593Smuzhiyun 	int st;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
499*4882a593Smuzhiyun 				sizeof(size), &size, 0);
500*4882a593Smuzhiyun 	if (st)
501*4882a593Smuzhiyun 		return st;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	*size8 = (u64)size * RSXX_HW_BLK_SIZE;
504*4882a593Smuzhiyun 	return 0;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
rsxx_get_num_targets(struct rsxx_cardinfo * card,unsigned int * n_targets)507*4882a593Smuzhiyun int rsxx_get_num_targets(struct rsxx_cardinfo *card,
508*4882a593Smuzhiyun 			     unsigned int *n_targets)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
511*4882a593Smuzhiyun 				  sizeof(*n_targets), n_targets, 0);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
rsxx_get_card_capabilities(struct rsxx_cardinfo * card,u32 * capabilities)514*4882a593Smuzhiyun int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
515*4882a593Smuzhiyun 				   u32 *capabilities)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
518*4882a593Smuzhiyun 				  sizeof(*capabilities), capabilities, 0);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun 
rsxx_issue_card_cmd(struct rsxx_cardinfo * card,u32 cmd)521*4882a593Smuzhiyun int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun 	return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
524*4882a593Smuzhiyun 				   sizeof(cmd), &cmd, 0);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun /*----------------- HW Log Functions -------------------*/
hw_log_msg(struct rsxx_cardinfo * card,const char * str,int len)529*4882a593Smuzhiyun static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	static char level;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	/*
534*4882a593Smuzhiyun 	 * New messages start with "<#>", where # is the log level. Messages
535*4882a593Smuzhiyun 	 * that extend past the log buffer will use the previous level
536*4882a593Smuzhiyun 	 */
537*4882a593Smuzhiyun 	if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
538*4882a593Smuzhiyun 		level = str[1];
539*4882a593Smuzhiyun 		str += 3; /* Skip past the log level. */
540*4882a593Smuzhiyun 		len -= 3;
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	switch (level) {
544*4882a593Smuzhiyun 	case '0':
545*4882a593Smuzhiyun 		dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
546*4882a593Smuzhiyun 		break;
547*4882a593Smuzhiyun 	case '1':
548*4882a593Smuzhiyun 		dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
549*4882a593Smuzhiyun 		break;
550*4882a593Smuzhiyun 	case '2':
551*4882a593Smuzhiyun 		dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
552*4882a593Smuzhiyun 		break;
553*4882a593Smuzhiyun 	case '3':
554*4882a593Smuzhiyun 		dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
555*4882a593Smuzhiyun 		break;
556*4882a593Smuzhiyun 	case '4':
557*4882a593Smuzhiyun 		dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
558*4882a593Smuzhiyun 		break;
559*4882a593Smuzhiyun 	case '5':
560*4882a593Smuzhiyun 		dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
561*4882a593Smuzhiyun 		break;
562*4882a593Smuzhiyun 	case '6':
563*4882a593Smuzhiyun 		dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
564*4882a593Smuzhiyun 		break;
565*4882a593Smuzhiyun 	case '7':
566*4882a593Smuzhiyun 		dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
567*4882a593Smuzhiyun 		break;
568*4882a593Smuzhiyun 	default:
569*4882a593Smuzhiyun 		dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
570*4882a593Smuzhiyun 		break;
571*4882a593Smuzhiyun 	}
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun /*
575*4882a593Smuzhiyun  * The substrncpy function copies the src string (which includes the
576*4882a593Smuzhiyun  * terminating '\0' character), up to the count into the dest pointer.
577*4882a593Smuzhiyun  * Returns the number of bytes copied to dest.
578*4882a593Smuzhiyun  */
substrncpy(char * dest,const char * src,int count)579*4882a593Smuzhiyun static int substrncpy(char *dest, const char *src, int count)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	int max_cnt = count;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	while (count) {
584*4882a593Smuzhiyun 		count--;
585*4882a593Smuzhiyun 		*dest = *src;
586*4882a593Smuzhiyun 		if (*dest == '\0')
587*4882a593Smuzhiyun 			break;
588*4882a593Smuzhiyun 		src++;
589*4882a593Smuzhiyun 		dest++;
590*4882a593Smuzhiyun 	}
591*4882a593Smuzhiyun 	return max_cnt - count;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 
read_hw_log_done(struct rsxx_cardinfo * card,struct creg_cmd * cmd,int st)595*4882a593Smuzhiyun static void read_hw_log_done(struct rsxx_cardinfo *card,
596*4882a593Smuzhiyun 			     struct creg_cmd *cmd,
597*4882a593Smuzhiyun 			     int st)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	char *buf;
600*4882a593Smuzhiyun 	char *log_str;
601*4882a593Smuzhiyun 	int cnt;
602*4882a593Smuzhiyun 	int len;
603*4882a593Smuzhiyun 	int off;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	buf = cmd->buf;
606*4882a593Smuzhiyun 	off = 0;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/* Failed getting the log message */
609*4882a593Smuzhiyun 	if (st)
610*4882a593Smuzhiyun 		return;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	while (off < cmd->cnt8) {
613*4882a593Smuzhiyun 		log_str = &card->log.buf[card->log.buf_len];
614*4882a593Smuzhiyun 		cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
615*4882a593Smuzhiyun 		len = substrncpy(log_str, &buf[off], cnt);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 		off += len;
618*4882a593Smuzhiyun 		card->log.buf_len += len;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 		/*
621*4882a593Smuzhiyun 		 * Flush the log if we've hit the end of a message or if we've
622*4882a593Smuzhiyun 		 * run out of buffer space.
623*4882a593Smuzhiyun 		 */
624*4882a593Smuzhiyun 		if ((log_str[len - 1] == '\0')  ||
625*4882a593Smuzhiyun 		    (card->log.buf_len == LOG_BUF_SIZE8)) {
626*4882a593Smuzhiyun 			if (card->log.buf_len != 1) /* Don't log blank lines. */
627*4882a593Smuzhiyun 				hw_log_msg(card, card->log.buf,
628*4882a593Smuzhiyun 					   card->log.buf_len);
629*4882a593Smuzhiyun 			card->log.buf_len = 0;
630*4882a593Smuzhiyun 		}
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	if (cmd->status & CREG_STAT_LOG_PENDING)
635*4882a593Smuzhiyun 		rsxx_read_hw_log(card);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
rsxx_read_hw_log(struct rsxx_cardinfo * card)638*4882a593Smuzhiyun int rsxx_read_hw_log(struct rsxx_cardinfo *card)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	int st;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
643*4882a593Smuzhiyun 			    sizeof(card->log.tmp), card->log.tmp,
644*4882a593Smuzhiyun 			    1, read_hw_log_done, NULL);
645*4882a593Smuzhiyun 	if (st)
646*4882a593Smuzhiyun 		dev_err(CARD_TO_DEV(card),
647*4882a593Smuzhiyun 			"Failed getting log text\n");
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	return st;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun /*-------------- IOCTL REG Access ------------------*/
issue_reg_cmd(struct rsxx_cardinfo * card,struct rsxx_reg_access * cmd,int read)653*4882a593Smuzhiyun static int issue_reg_cmd(struct rsxx_cardinfo *card,
654*4882a593Smuzhiyun 			 struct rsxx_reg_access *cmd,
655*4882a593Smuzhiyun 			 int read)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
660*4882a593Smuzhiyun 			       cmd->stream, &cmd->stat);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun 
rsxx_reg_access(struct rsxx_cardinfo * card,struct rsxx_reg_access __user * ucmd,int read)663*4882a593Smuzhiyun int rsxx_reg_access(struct rsxx_cardinfo *card,
664*4882a593Smuzhiyun 			struct rsxx_reg_access __user *ucmd,
665*4882a593Smuzhiyun 			int read)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun 	struct rsxx_reg_access cmd;
668*4882a593Smuzhiyun 	int st;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	st = copy_from_user(&cmd, ucmd, sizeof(cmd));
671*4882a593Smuzhiyun 	if (st)
672*4882a593Smuzhiyun 		return -EFAULT;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	if (cmd.cnt > RSXX_MAX_REG_CNT)
675*4882a593Smuzhiyun 		return -EFAULT;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	st = issue_reg_cmd(card, &cmd, read);
678*4882a593Smuzhiyun 	if (st)
679*4882a593Smuzhiyun 		return st;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	st = put_user(cmd.stat, &ucmd->stat);
682*4882a593Smuzhiyun 	if (st)
683*4882a593Smuzhiyun 		return -EFAULT;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	if (read) {
686*4882a593Smuzhiyun 		st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
687*4882a593Smuzhiyun 		if (st)
688*4882a593Smuzhiyun 			return -EFAULT;
689*4882a593Smuzhiyun 	}
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	return 0;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun 
rsxx_eeh_save_issued_creg(struct rsxx_cardinfo * card)694*4882a593Smuzhiyun void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	struct creg_cmd *cmd = NULL;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	cmd = card->creg_ctrl.active_cmd;
699*4882a593Smuzhiyun 	card->creg_ctrl.active_cmd = NULL;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	if (cmd) {
702*4882a593Smuzhiyun 		del_timer_sync(&card->creg_ctrl.cmd_timer);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 		spin_lock_bh(&card->creg_ctrl.lock);
705*4882a593Smuzhiyun 		list_add(&cmd->list, &card->creg_ctrl.queue);
706*4882a593Smuzhiyun 		card->creg_ctrl.q_depth++;
707*4882a593Smuzhiyun 		card->creg_ctrl.active = 0;
708*4882a593Smuzhiyun 		spin_unlock_bh(&card->creg_ctrl.lock);
709*4882a593Smuzhiyun 	}
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
rsxx_kick_creg_queue(struct rsxx_cardinfo * card)712*4882a593Smuzhiyun void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	spin_lock_bh(&card->creg_ctrl.lock);
715*4882a593Smuzhiyun 	if (!list_empty(&card->creg_ctrl.queue))
716*4882a593Smuzhiyun 		creg_kick_queue(card);
717*4882a593Smuzhiyun 	spin_unlock_bh(&card->creg_ctrl.lock);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun /*------------ Initialization & Setup --------------*/
rsxx_creg_setup(struct rsxx_cardinfo * card)721*4882a593Smuzhiyun int rsxx_creg_setup(struct rsxx_cardinfo *card)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun 	card->creg_ctrl.active_cmd = NULL;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	card->creg_ctrl.creg_wq =
726*4882a593Smuzhiyun 			create_singlethread_workqueue(DRIVER_NAME"_creg");
727*4882a593Smuzhiyun 	if (!card->creg_ctrl.creg_wq)
728*4882a593Smuzhiyun 		return -ENOMEM;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
731*4882a593Smuzhiyun 	mutex_init(&card->creg_ctrl.reset_lock);
732*4882a593Smuzhiyun 	INIT_LIST_HEAD(&card->creg_ctrl.queue);
733*4882a593Smuzhiyun 	spin_lock_init(&card->creg_ctrl.lock);
734*4882a593Smuzhiyun 	timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	return 0;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun 
rsxx_creg_destroy(struct rsxx_cardinfo * card)739*4882a593Smuzhiyun void rsxx_creg_destroy(struct rsxx_cardinfo *card)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun 	struct creg_cmd *cmd;
742*4882a593Smuzhiyun 	struct creg_cmd *tmp;
743*4882a593Smuzhiyun 	int cnt = 0;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	/* Cancel outstanding commands */
746*4882a593Smuzhiyun 	spin_lock_bh(&card->creg_ctrl.lock);
747*4882a593Smuzhiyun 	list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
748*4882a593Smuzhiyun 		list_del(&cmd->list);
749*4882a593Smuzhiyun 		if (cmd->cb)
750*4882a593Smuzhiyun 			cmd->cb(card, cmd, -ECANCELED);
751*4882a593Smuzhiyun 		kmem_cache_free(creg_cmd_pool, cmd);
752*4882a593Smuzhiyun 		cnt++;
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	if (cnt)
756*4882a593Smuzhiyun 		dev_info(CARD_TO_DEV(card),
757*4882a593Smuzhiyun 			"Canceled %d queue creg commands\n", cnt);
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	cmd = card->creg_ctrl.active_cmd;
760*4882a593Smuzhiyun 	card->creg_ctrl.active_cmd = NULL;
761*4882a593Smuzhiyun 	if (cmd) {
762*4882a593Smuzhiyun 		if (timer_pending(&card->creg_ctrl.cmd_timer))
763*4882a593Smuzhiyun 			del_timer_sync(&card->creg_ctrl.cmd_timer);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 		if (cmd->cb)
766*4882a593Smuzhiyun 			cmd->cb(card, cmd, -ECANCELED);
767*4882a593Smuzhiyun 		dev_info(CARD_TO_DEV(card),
768*4882a593Smuzhiyun 			"Canceled active creg command\n");
769*4882a593Smuzhiyun 		kmem_cache_free(creg_cmd_pool, cmd);
770*4882a593Smuzhiyun 	}
771*4882a593Smuzhiyun 	spin_unlock_bh(&card->creg_ctrl.lock);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	cancel_work_sync(&card->creg_ctrl.done_work);
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 
rsxx_creg_init(void)777*4882a593Smuzhiyun int rsxx_creg_init(void)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
780*4882a593Smuzhiyun 	if (!creg_cmd_pool)
781*4882a593Smuzhiyun 		return -ENOMEM;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	return 0;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun 
rsxx_creg_cleanup(void)786*4882a593Smuzhiyun void rsxx_creg_cleanup(void)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun 	kmem_cache_destroy(creg_cmd_pool);
789*4882a593Smuzhiyun }
790