xref: /OK3568_Linux_fs/kernel/drivers/mtd/lpddr/lpddr_cmds.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * LPDDR flash memory device operations. This module provides read, write,
4*4882a593Smuzhiyun  * erase, lock/unlock support for LPDDR flash memories
5*4882a593Smuzhiyun  * (C) 2008 Korolev Alexey <akorolev@infradead.org>
6*4882a593Smuzhiyun  * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
7*4882a593Smuzhiyun  * Many thanks to Roman Borisov for initial enabling
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * TODO:
10*4882a593Smuzhiyun  * Implement VPP management
11*4882a593Smuzhiyun  * Implement XIP support
12*4882a593Smuzhiyun  * Implement OTP support
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun #include <linux/mtd/pfow.h>
15*4882a593Smuzhiyun #include <linux/mtd/qinfo.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
20*4882a593Smuzhiyun 					size_t *retlen, u_char *buf);
21*4882a593Smuzhiyun static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
22*4882a593Smuzhiyun 				size_t len, size_t *retlen, const u_char *buf);
23*4882a593Smuzhiyun static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
24*4882a593Smuzhiyun 				unsigned long count, loff_t to, size_t *retlen);
25*4882a593Smuzhiyun static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
26*4882a593Smuzhiyun static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
27*4882a593Smuzhiyun static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
28*4882a593Smuzhiyun static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
29*4882a593Smuzhiyun 			size_t *retlen, void **mtdbuf, resource_size_t *phys);
30*4882a593Smuzhiyun static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
31*4882a593Smuzhiyun static int get_chip(struct map_info *map, struct flchip *chip, int mode);
32*4882a593Smuzhiyun static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
33*4882a593Smuzhiyun static void put_chip(struct map_info *map, struct flchip *chip);
34*4882a593Smuzhiyun 
lpddr_cmdset(struct map_info * map)35*4882a593Smuzhiyun struct mtd_info *lpddr_cmdset(struct map_info *map)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	struct lpddr_private *lpddr = map->fldrv_priv;
38*4882a593Smuzhiyun 	struct flchip_shared *shared;
39*4882a593Smuzhiyun 	struct flchip *chip;
40*4882a593Smuzhiyun 	struct mtd_info *mtd;
41*4882a593Smuzhiyun 	int numchips;
42*4882a593Smuzhiyun 	int i, j;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
45*4882a593Smuzhiyun 	if (!mtd)
46*4882a593Smuzhiyun 		return NULL;
47*4882a593Smuzhiyun 	mtd->priv = map;
48*4882a593Smuzhiyun 	mtd->type = MTD_NORFLASH;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	/* Fill in the default mtd operations */
51*4882a593Smuzhiyun 	mtd->_read = lpddr_read;
52*4882a593Smuzhiyun 	mtd->type = MTD_NORFLASH;
53*4882a593Smuzhiyun 	mtd->flags = MTD_CAP_NORFLASH;
54*4882a593Smuzhiyun 	mtd->flags &= ~MTD_BIT_WRITEABLE;
55*4882a593Smuzhiyun 	mtd->_erase = lpddr_erase;
56*4882a593Smuzhiyun 	mtd->_write = lpddr_write_buffers;
57*4882a593Smuzhiyun 	mtd->_writev = lpddr_writev;
58*4882a593Smuzhiyun 	mtd->_lock = lpddr_lock;
59*4882a593Smuzhiyun 	mtd->_unlock = lpddr_unlock;
60*4882a593Smuzhiyun 	if (map_is_linear(map)) {
61*4882a593Smuzhiyun 		mtd->_point = lpddr_point;
62*4882a593Smuzhiyun 		mtd->_unpoint = lpddr_unpoint;
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 	mtd->size = 1 << lpddr->qinfo->DevSizeShift;
65*4882a593Smuzhiyun 	mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
66*4882a593Smuzhiyun 	mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
69*4882a593Smuzhiyun 						GFP_KERNEL);
70*4882a593Smuzhiyun 	if (!shared) {
71*4882a593Smuzhiyun 		kfree(mtd);
72*4882a593Smuzhiyun 		return NULL;
73*4882a593Smuzhiyun 	}
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	chip = &lpddr->chips[0];
76*4882a593Smuzhiyun 	numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
77*4882a593Smuzhiyun 	for (i = 0; i < numchips; i++) {
78*4882a593Smuzhiyun 		shared[i].writing = shared[i].erasing = NULL;
79*4882a593Smuzhiyun 		mutex_init(&shared[i].lock);
80*4882a593Smuzhiyun 		for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
81*4882a593Smuzhiyun 			*chip = lpddr->chips[i];
82*4882a593Smuzhiyun 			chip->start += j << lpddr->chipshift;
83*4882a593Smuzhiyun 			chip->oldstate = chip->state = FL_READY;
84*4882a593Smuzhiyun 			chip->priv = &shared[i];
85*4882a593Smuzhiyun 			/* those should be reset too since
86*4882a593Smuzhiyun 			   they create memory references. */
87*4882a593Smuzhiyun 			init_waitqueue_head(&chip->wq);
88*4882a593Smuzhiyun 			mutex_init(&chip->mutex);
89*4882a593Smuzhiyun 			chip++;
90*4882a593Smuzhiyun 		}
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	return mtd;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun EXPORT_SYMBOL(lpddr_cmdset);
96*4882a593Smuzhiyun 
print_drs_error(unsigned int dsr)97*4882a593Smuzhiyun static void print_drs_error(unsigned int dsr)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	int prog_status = (dsr & DSR_RPS) >> 8;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (!(dsr & DSR_AVAILABLE))
102*4882a593Smuzhiyun 		pr_notice("DSR.15: (0) Device not Available\n");
103*4882a593Smuzhiyun 	if ((prog_status & 0x03) == 0x03)
104*4882a593Smuzhiyun 		pr_notice("DSR.9,8: (11) Attempt to program invalid half with 41h command\n");
105*4882a593Smuzhiyun 	else if (prog_status & 0x02)
106*4882a593Smuzhiyun 		pr_notice("DSR.9,8: (10) Object Mode Program attempt in region with Control Mode data\n");
107*4882a593Smuzhiyun 	else if (prog_status &  0x01)
108*4882a593Smuzhiyun 		pr_notice("DSR.9,8: (01) Program attempt in region with Object Mode data\n");
109*4882a593Smuzhiyun 	if (!(dsr & DSR_READY_STATUS))
110*4882a593Smuzhiyun 		pr_notice("DSR.7: (0) Device is Busy\n");
111*4882a593Smuzhiyun 	if (dsr & DSR_ESS)
112*4882a593Smuzhiyun 		pr_notice("DSR.6: (1) Erase Suspended\n");
113*4882a593Smuzhiyun 	if (dsr & DSR_ERASE_STATUS)
114*4882a593Smuzhiyun 		pr_notice("DSR.5: (1) Erase/Blank check error\n");
115*4882a593Smuzhiyun 	if (dsr & DSR_PROGRAM_STATUS)
116*4882a593Smuzhiyun 		pr_notice("DSR.4: (1) Program Error\n");
117*4882a593Smuzhiyun 	if (dsr & DSR_VPPS)
118*4882a593Smuzhiyun 		pr_notice("DSR.3: (1) Vpp low detect, operation aborted\n");
119*4882a593Smuzhiyun 	if (dsr & DSR_PSS)
120*4882a593Smuzhiyun 		pr_notice("DSR.2: (1) Program suspended\n");
121*4882a593Smuzhiyun 	if (dsr & DSR_DPS)
122*4882a593Smuzhiyun 		pr_notice("DSR.1: (1) Aborted Erase/Program attempt on locked block\n");
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
wait_for_ready(struct map_info * map,struct flchip * chip,unsigned int chip_op_time)125*4882a593Smuzhiyun static int wait_for_ready(struct map_info *map, struct flchip *chip,
126*4882a593Smuzhiyun 		unsigned int chip_op_time)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	unsigned int timeo, reset_timeo, sleep_time;
129*4882a593Smuzhiyun 	unsigned int dsr;
130*4882a593Smuzhiyun 	flstate_t chip_state = chip->state;
131*4882a593Smuzhiyun 	int ret = 0;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* set our timeout to 8 times the expected delay */
134*4882a593Smuzhiyun 	timeo = chip_op_time * 8;
135*4882a593Smuzhiyun 	if (!timeo)
136*4882a593Smuzhiyun 		timeo = 500000;
137*4882a593Smuzhiyun 	reset_timeo = timeo;
138*4882a593Smuzhiyun 	sleep_time = chip_op_time / 2;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	for (;;) {
141*4882a593Smuzhiyun 		dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
142*4882a593Smuzhiyun 		if (dsr & DSR_READY_STATUS)
143*4882a593Smuzhiyun 			break;
144*4882a593Smuzhiyun 		if (!timeo) {
145*4882a593Smuzhiyun 			printk(KERN_ERR "%s: Flash timeout error state %d \n",
146*4882a593Smuzhiyun 							map->name, chip_state);
147*4882a593Smuzhiyun 			ret = -ETIME;
148*4882a593Smuzhiyun 			break;
149*4882a593Smuzhiyun 		}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		/* OK Still waiting. Drop the lock, wait a while and retry. */
152*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
153*4882a593Smuzhiyun 		if (sleep_time >= 1000000/HZ) {
154*4882a593Smuzhiyun 			/*
155*4882a593Smuzhiyun 			 * Half of the normal delay still remaining
156*4882a593Smuzhiyun 			 * can be performed with a sleeping delay instead
157*4882a593Smuzhiyun 			 * of busy waiting.
158*4882a593Smuzhiyun 			 */
159*4882a593Smuzhiyun 			msleep(sleep_time/1000);
160*4882a593Smuzhiyun 			timeo -= sleep_time;
161*4882a593Smuzhiyun 			sleep_time = 1000000/HZ;
162*4882a593Smuzhiyun 		} else {
163*4882a593Smuzhiyun 			udelay(1);
164*4882a593Smuzhiyun 			cond_resched();
165*4882a593Smuzhiyun 			timeo--;
166*4882a593Smuzhiyun 		}
167*4882a593Smuzhiyun 		mutex_lock(&chip->mutex);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 		while (chip->state != chip_state) {
170*4882a593Smuzhiyun 			/* Someone's suspended the operation: sleep */
171*4882a593Smuzhiyun 			DECLARE_WAITQUEUE(wait, current);
172*4882a593Smuzhiyun 			set_current_state(TASK_UNINTERRUPTIBLE);
173*4882a593Smuzhiyun 			add_wait_queue(&chip->wq, &wait);
174*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
175*4882a593Smuzhiyun 			schedule();
176*4882a593Smuzhiyun 			remove_wait_queue(&chip->wq, &wait);
177*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
178*4882a593Smuzhiyun 		}
179*4882a593Smuzhiyun 		if (chip->erase_suspended || chip->write_suspended)  {
180*4882a593Smuzhiyun 			/* Suspend has occurred while sleep: reset timeout */
181*4882a593Smuzhiyun 			timeo = reset_timeo;
182*4882a593Smuzhiyun 			chip->erase_suspended = chip->write_suspended = 0;
183*4882a593Smuzhiyun 		}
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 	/* check status for errors */
186*4882a593Smuzhiyun 	if (dsr & DSR_ERR) {
187*4882a593Smuzhiyun 		/* Clear DSR*/
188*4882a593Smuzhiyun 		map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
189*4882a593Smuzhiyun 		printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
190*4882a593Smuzhiyun 				map->name, dsr);
191*4882a593Smuzhiyun 		print_drs_error(dsr);
192*4882a593Smuzhiyun 		ret = -EIO;
193*4882a593Smuzhiyun 	}
194*4882a593Smuzhiyun 	chip->state = FL_READY;
195*4882a593Smuzhiyun 	return ret;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
get_chip(struct map_info * map,struct flchip * chip,int mode)198*4882a593Smuzhiyun static int get_chip(struct map_info *map, struct flchip *chip, int mode)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	int ret;
201*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(wait, current);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun  retry:
204*4882a593Smuzhiyun 	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
205*4882a593Smuzhiyun 		&& chip->state != FL_SYNCING) {
206*4882a593Smuzhiyun 		/*
207*4882a593Smuzhiyun 		 * OK. We have possibility for contension on the write/erase
208*4882a593Smuzhiyun 		 * operations which are global to the real chip and not per
209*4882a593Smuzhiyun 		 * partition.  So let's fight it over in the partition which
210*4882a593Smuzhiyun 		 * currently has authority on the operation.
211*4882a593Smuzhiyun 		 *
212*4882a593Smuzhiyun 		 * The rules are as follows:
213*4882a593Smuzhiyun 		 *
214*4882a593Smuzhiyun 		 * - any write operation must own shared->writing.
215*4882a593Smuzhiyun 		 *
216*4882a593Smuzhiyun 		 * - any erase operation must own _both_ shared->writing and
217*4882a593Smuzhiyun 		 *   shared->erasing.
218*4882a593Smuzhiyun 		 *
219*4882a593Smuzhiyun 		 * - contension arbitration is handled in the owner's context.
220*4882a593Smuzhiyun 		 *
221*4882a593Smuzhiyun 		 * The 'shared' struct can be read and/or written only when
222*4882a593Smuzhiyun 		 * its lock is taken.
223*4882a593Smuzhiyun 		 */
224*4882a593Smuzhiyun 		struct flchip_shared *shared = chip->priv;
225*4882a593Smuzhiyun 		struct flchip *contender;
226*4882a593Smuzhiyun 		mutex_lock(&shared->lock);
227*4882a593Smuzhiyun 		contender = shared->writing;
228*4882a593Smuzhiyun 		if (contender && contender != chip) {
229*4882a593Smuzhiyun 			/*
230*4882a593Smuzhiyun 			 * The engine to perform desired operation on this
231*4882a593Smuzhiyun 			 * partition is already in use by someone else.
232*4882a593Smuzhiyun 			 * Let's fight over it in the context of the chip
233*4882a593Smuzhiyun 			 * currently using it.  If it is possible to suspend,
234*4882a593Smuzhiyun 			 * that other partition will do just that, otherwise
235*4882a593Smuzhiyun 			 * it'll happily send us to sleep.  In any case, when
236*4882a593Smuzhiyun 			 * get_chip returns success we're clear to go ahead.
237*4882a593Smuzhiyun 			 */
238*4882a593Smuzhiyun 			ret = mutex_trylock(&contender->mutex);
239*4882a593Smuzhiyun 			mutex_unlock(&shared->lock);
240*4882a593Smuzhiyun 			if (!ret)
241*4882a593Smuzhiyun 				goto retry;
242*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
243*4882a593Smuzhiyun 			ret = chip_ready(map, contender, mode);
244*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 			if (ret == -EAGAIN) {
247*4882a593Smuzhiyun 				mutex_unlock(&contender->mutex);
248*4882a593Smuzhiyun 				goto retry;
249*4882a593Smuzhiyun 			}
250*4882a593Smuzhiyun 			if (ret) {
251*4882a593Smuzhiyun 				mutex_unlock(&contender->mutex);
252*4882a593Smuzhiyun 				return ret;
253*4882a593Smuzhiyun 			}
254*4882a593Smuzhiyun 			mutex_lock(&shared->lock);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 			/* We should not own chip if it is already in FL_SYNCING
257*4882a593Smuzhiyun 			 * state. Put contender and retry. */
258*4882a593Smuzhiyun 			if (chip->state == FL_SYNCING) {
259*4882a593Smuzhiyun 				put_chip(map, contender);
260*4882a593Smuzhiyun 				mutex_unlock(&contender->mutex);
261*4882a593Smuzhiyun 				goto retry;
262*4882a593Smuzhiyun 			}
263*4882a593Smuzhiyun 			mutex_unlock(&contender->mutex);
264*4882a593Smuzhiyun 		}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 		/* Check if we have suspended erase on this chip.
267*4882a593Smuzhiyun 		   Must sleep in such a case. */
268*4882a593Smuzhiyun 		if (mode == FL_ERASING && shared->erasing
269*4882a593Smuzhiyun 		    && shared->erasing->oldstate == FL_ERASING) {
270*4882a593Smuzhiyun 			mutex_unlock(&shared->lock);
271*4882a593Smuzhiyun 			set_current_state(TASK_UNINTERRUPTIBLE);
272*4882a593Smuzhiyun 			add_wait_queue(&chip->wq, &wait);
273*4882a593Smuzhiyun 			mutex_unlock(&chip->mutex);
274*4882a593Smuzhiyun 			schedule();
275*4882a593Smuzhiyun 			remove_wait_queue(&chip->wq, &wait);
276*4882a593Smuzhiyun 			mutex_lock(&chip->mutex);
277*4882a593Smuzhiyun 			goto retry;
278*4882a593Smuzhiyun 		}
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		/* We now own it */
281*4882a593Smuzhiyun 		shared->writing = chip;
282*4882a593Smuzhiyun 		if (mode == FL_ERASING)
283*4882a593Smuzhiyun 			shared->erasing = chip;
284*4882a593Smuzhiyun 		mutex_unlock(&shared->lock);
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	ret = chip_ready(map, chip, mode);
288*4882a593Smuzhiyun 	if (ret == -EAGAIN)
289*4882a593Smuzhiyun 		goto retry;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	return ret;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
chip_ready(struct map_info * map,struct flchip * chip,int mode)294*4882a593Smuzhiyun static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	struct lpddr_private *lpddr = map->fldrv_priv;
297*4882a593Smuzhiyun 	int ret = 0;
298*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(wait, current);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* Prevent setting state FL_SYNCING for chip in suspended state. */
301*4882a593Smuzhiyun 	if (FL_SYNCING == mode && FL_READY != chip->oldstate)
302*4882a593Smuzhiyun 		goto sleep;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	switch (chip->state) {
305*4882a593Smuzhiyun 	case FL_READY:
306*4882a593Smuzhiyun 	case FL_JEDEC_QUERY:
307*4882a593Smuzhiyun 		return 0;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	case FL_ERASING:
310*4882a593Smuzhiyun 		if (!lpddr->qinfo->SuspEraseSupp ||
311*4882a593Smuzhiyun 			!(mode == FL_READY || mode == FL_POINT))
312*4882a593Smuzhiyun 			goto sleep;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		map_write(map, CMD(LPDDR_SUSPEND),
315*4882a593Smuzhiyun 			map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
316*4882a593Smuzhiyun 		chip->oldstate = FL_ERASING;
317*4882a593Smuzhiyun 		chip->state = FL_ERASE_SUSPENDING;
318*4882a593Smuzhiyun 		ret = wait_for_ready(map, chip, 0);
319*4882a593Smuzhiyun 		if (ret) {
320*4882a593Smuzhiyun 			/* Oops. something got wrong. */
321*4882a593Smuzhiyun 			/* Resume and pretend we weren't here.  */
322*4882a593Smuzhiyun 			put_chip(map, chip);
323*4882a593Smuzhiyun 			printk(KERN_ERR "%s: suspend operation failed."
324*4882a593Smuzhiyun 					"State may be wrong \n", map->name);
325*4882a593Smuzhiyun 			return -EIO;
326*4882a593Smuzhiyun 		}
327*4882a593Smuzhiyun 		chip->erase_suspended = 1;
328*4882a593Smuzhiyun 		chip->state = FL_READY;
329*4882a593Smuzhiyun 		return 0;
330*4882a593Smuzhiyun 		/* Erase suspend */
331*4882a593Smuzhiyun 	case FL_POINT:
332*4882a593Smuzhiyun 		/* Only if there's no operation suspended... */
333*4882a593Smuzhiyun 		if (mode == FL_READY && chip->oldstate == FL_READY)
334*4882a593Smuzhiyun 			return 0;
335*4882a593Smuzhiyun 		fallthrough;
336*4882a593Smuzhiyun 	default:
337*4882a593Smuzhiyun sleep:
338*4882a593Smuzhiyun 		set_current_state(TASK_UNINTERRUPTIBLE);
339*4882a593Smuzhiyun 		add_wait_queue(&chip->wq, &wait);
340*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
341*4882a593Smuzhiyun 		schedule();
342*4882a593Smuzhiyun 		remove_wait_queue(&chip->wq, &wait);
343*4882a593Smuzhiyun 		mutex_lock(&chip->mutex);
344*4882a593Smuzhiyun 		return -EAGAIN;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
put_chip(struct map_info * map,struct flchip * chip)348*4882a593Smuzhiyun static void put_chip(struct map_info *map, struct flchip *chip)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	if (chip->priv) {
351*4882a593Smuzhiyun 		struct flchip_shared *shared = chip->priv;
352*4882a593Smuzhiyun 		mutex_lock(&shared->lock);
353*4882a593Smuzhiyun 		if (shared->writing == chip && chip->oldstate == FL_READY) {
354*4882a593Smuzhiyun 			/* We own the ability to write, but we're done */
355*4882a593Smuzhiyun 			shared->writing = shared->erasing;
356*4882a593Smuzhiyun 			if (shared->writing && shared->writing != chip) {
357*4882a593Smuzhiyun 				/* give back the ownership */
358*4882a593Smuzhiyun 				struct flchip *loaner = shared->writing;
359*4882a593Smuzhiyun 				mutex_lock(&loaner->mutex);
360*4882a593Smuzhiyun 				mutex_unlock(&shared->lock);
361*4882a593Smuzhiyun 				mutex_unlock(&chip->mutex);
362*4882a593Smuzhiyun 				put_chip(map, loaner);
363*4882a593Smuzhiyun 				mutex_lock(&chip->mutex);
364*4882a593Smuzhiyun 				mutex_unlock(&loaner->mutex);
365*4882a593Smuzhiyun 				wake_up(&chip->wq);
366*4882a593Smuzhiyun 				return;
367*4882a593Smuzhiyun 			}
368*4882a593Smuzhiyun 			shared->erasing = NULL;
369*4882a593Smuzhiyun 			shared->writing = NULL;
370*4882a593Smuzhiyun 		} else if (shared->erasing == chip && shared->writing != chip) {
371*4882a593Smuzhiyun 			/*
372*4882a593Smuzhiyun 			 * We own the ability to erase without the ability
373*4882a593Smuzhiyun 			 * to write, which means the erase was suspended
374*4882a593Smuzhiyun 			 * and some other partition is currently writing.
375*4882a593Smuzhiyun 			 * Don't let the switch below mess things up since
376*4882a593Smuzhiyun 			 * we don't have ownership to resume anything.
377*4882a593Smuzhiyun 			 */
378*4882a593Smuzhiyun 			mutex_unlock(&shared->lock);
379*4882a593Smuzhiyun 			wake_up(&chip->wq);
380*4882a593Smuzhiyun 			return;
381*4882a593Smuzhiyun 		}
382*4882a593Smuzhiyun 		mutex_unlock(&shared->lock);
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	switch (chip->oldstate) {
386*4882a593Smuzhiyun 	case FL_ERASING:
387*4882a593Smuzhiyun 		map_write(map, CMD(LPDDR_RESUME),
388*4882a593Smuzhiyun 				map->pfow_base + PFOW_COMMAND_CODE);
389*4882a593Smuzhiyun 		map_write(map, CMD(LPDDR_START_EXECUTION),
390*4882a593Smuzhiyun 				map->pfow_base + PFOW_COMMAND_EXECUTE);
391*4882a593Smuzhiyun 		chip->oldstate = FL_READY;
392*4882a593Smuzhiyun 		chip->state = FL_ERASING;
393*4882a593Smuzhiyun 		break;
394*4882a593Smuzhiyun 	case FL_READY:
395*4882a593Smuzhiyun 		break;
396*4882a593Smuzhiyun 	default:
397*4882a593Smuzhiyun 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
398*4882a593Smuzhiyun 				map->name, chip->oldstate);
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 	wake_up(&chip->wq);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
do_write_buffer(struct map_info * map,struct flchip * chip,unsigned long adr,const struct kvec ** pvec,unsigned long * pvec_seek,int len)403*4882a593Smuzhiyun static int do_write_buffer(struct map_info *map, struct flchip *chip,
404*4882a593Smuzhiyun 			unsigned long adr, const struct kvec **pvec,
405*4882a593Smuzhiyun 			unsigned long *pvec_seek, int len)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	struct lpddr_private *lpddr = map->fldrv_priv;
408*4882a593Smuzhiyun 	map_word datum;
409*4882a593Smuzhiyun 	int ret, wbufsize, word_gap, words;
410*4882a593Smuzhiyun 	const struct kvec *vec;
411*4882a593Smuzhiyun 	unsigned long vec_seek;
412*4882a593Smuzhiyun 	unsigned long prog_buf_ofs;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	wbufsize = 1 << lpddr->qinfo->BufSizeShift;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
417*4882a593Smuzhiyun 	ret = get_chip(map, chip, FL_WRITING);
418*4882a593Smuzhiyun 	if (ret) {
419*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
420*4882a593Smuzhiyun 		return ret;
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 	/* Figure out the number of words to write */
423*4882a593Smuzhiyun 	word_gap = (-adr & (map_bankwidth(map)-1));
424*4882a593Smuzhiyun 	words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
425*4882a593Smuzhiyun 	if (!word_gap) {
426*4882a593Smuzhiyun 		words--;
427*4882a593Smuzhiyun 	} else {
428*4882a593Smuzhiyun 		word_gap = map_bankwidth(map) - word_gap;
429*4882a593Smuzhiyun 		adr -= word_gap;
430*4882a593Smuzhiyun 		datum = map_word_ff(map);
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 	/* Write data */
433*4882a593Smuzhiyun 	/* Get the program buffer offset from PFOW register data first*/
434*4882a593Smuzhiyun 	prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
435*4882a593Smuzhiyun 				map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
436*4882a593Smuzhiyun 	vec = *pvec;
437*4882a593Smuzhiyun 	vec_seek = *pvec_seek;
438*4882a593Smuzhiyun 	do {
439*4882a593Smuzhiyun 		int n = map_bankwidth(map) - word_gap;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		if (n > vec->iov_len - vec_seek)
442*4882a593Smuzhiyun 			n = vec->iov_len - vec_seek;
443*4882a593Smuzhiyun 		if (n > len)
444*4882a593Smuzhiyun 			n = len;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 		if (!word_gap && (len < map_bankwidth(map)))
447*4882a593Smuzhiyun 			datum = map_word_ff(map);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 		datum = map_word_load_partial(map, datum,
450*4882a593Smuzhiyun 				vec->iov_base + vec_seek, word_gap, n);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 		len -= n;
453*4882a593Smuzhiyun 		word_gap += n;
454*4882a593Smuzhiyun 		if (!len || word_gap == map_bankwidth(map)) {
455*4882a593Smuzhiyun 			map_write(map, datum, prog_buf_ofs);
456*4882a593Smuzhiyun 			prog_buf_ofs += map_bankwidth(map);
457*4882a593Smuzhiyun 			word_gap = 0;
458*4882a593Smuzhiyun 		}
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		vec_seek += n;
461*4882a593Smuzhiyun 		if (vec_seek == vec->iov_len) {
462*4882a593Smuzhiyun 			vec++;
463*4882a593Smuzhiyun 			vec_seek = 0;
464*4882a593Smuzhiyun 		}
465*4882a593Smuzhiyun 	} while (len);
466*4882a593Smuzhiyun 	*pvec = vec;
467*4882a593Smuzhiyun 	*pvec_seek = vec_seek;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	/* GO GO GO */
470*4882a593Smuzhiyun 	send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
471*4882a593Smuzhiyun 	chip->state = FL_WRITING;
472*4882a593Smuzhiyun 	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
473*4882a593Smuzhiyun 	if (ret)	{
474*4882a593Smuzhiyun 		printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
475*4882a593Smuzhiyun 			map->name, ret, adr);
476*4882a593Smuzhiyun 		goto out;
477*4882a593Smuzhiyun 	}
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun  out:	put_chip(map, chip);
480*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
481*4882a593Smuzhiyun 	return ret;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
do_erase_oneblock(struct mtd_info * mtd,loff_t adr)484*4882a593Smuzhiyun static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
487*4882a593Smuzhiyun 	struct lpddr_private *lpddr = map->fldrv_priv;
488*4882a593Smuzhiyun 	int chipnum = adr >> lpddr->chipshift;
489*4882a593Smuzhiyun 	struct flchip *chip = &lpddr->chips[chipnum];
490*4882a593Smuzhiyun 	int ret;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
493*4882a593Smuzhiyun 	ret = get_chip(map, chip, FL_ERASING);
494*4882a593Smuzhiyun 	if (ret) {
495*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
496*4882a593Smuzhiyun 		return ret;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 	send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
499*4882a593Smuzhiyun 	chip->state = FL_ERASING;
500*4882a593Smuzhiyun 	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
501*4882a593Smuzhiyun 	if (ret) {
502*4882a593Smuzhiyun 		printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
503*4882a593Smuzhiyun 			map->name, ret, adr);
504*4882a593Smuzhiyun 		goto out;
505*4882a593Smuzhiyun 	}
506*4882a593Smuzhiyun  out:	put_chip(map, chip);
507*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
508*4882a593Smuzhiyun 	return ret;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
lpddr_read(struct mtd_info * mtd,loff_t adr,size_t len,size_t * retlen,u_char * buf)511*4882a593Smuzhiyun static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
512*4882a593Smuzhiyun 			size_t *retlen, u_char *buf)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
515*4882a593Smuzhiyun 	struct lpddr_private *lpddr = map->fldrv_priv;
516*4882a593Smuzhiyun 	int chipnum = adr >> lpddr->chipshift;
517*4882a593Smuzhiyun 	struct flchip *chip = &lpddr->chips[chipnum];
518*4882a593Smuzhiyun 	int ret = 0;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
521*4882a593Smuzhiyun 	ret = get_chip(map, chip, FL_READY);
522*4882a593Smuzhiyun 	if (ret) {
523*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
524*4882a593Smuzhiyun 		return ret;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	map_copy_from(map, buf, adr, len);
528*4882a593Smuzhiyun 	*retlen = len;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	put_chip(map, chip);
531*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
532*4882a593Smuzhiyun 	return ret;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun 
lpddr_point(struct mtd_info * mtd,loff_t adr,size_t len,size_t * retlen,void ** mtdbuf,resource_size_t * phys)535*4882a593Smuzhiyun static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
536*4882a593Smuzhiyun 			size_t *retlen, void **mtdbuf, resource_size_t *phys)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
539*4882a593Smuzhiyun 	struct lpddr_private *lpddr = map->fldrv_priv;
540*4882a593Smuzhiyun 	int chipnum = adr >> lpddr->chipshift;
541*4882a593Smuzhiyun 	unsigned long ofs, last_end = 0;
542*4882a593Smuzhiyun 	struct flchip *chip = &lpddr->chips[chipnum];
543*4882a593Smuzhiyun 	int ret = 0;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	if (!map->virt)
546*4882a593Smuzhiyun 		return -EINVAL;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	/* ofs: offset within the first chip that the first read should start */
549*4882a593Smuzhiyun 	ofs = adr - (chipnum << lpddr->chipshift);
550*4882a593Smuzhiyun 	*mtdbuf = (void *)map->virt + chip->start + ofs;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	while (len) {
553*4882a593Smuzhiyun 		unsigned long thislen;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		if (chipnum >= lpddr->numchips)
556*4882a593Smuzhiyun 			break;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 		/* We cannot point across chips that are virtually disjoint */
559*4882a593Smuzhiyun 		if (!last_end)
560*4882a593Smuzhiyun 			last_end = chip->start;
561*4882a593Smuzhiyun 		else if (chip->start != last_end)
562*4882a593Smuzhiyun 			break;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 		if ((len + ofs - 1) >> lpddr->chipshift)
565*4882a593Smuzhiyun 			thislen = (1<<lpddr->chipshift) - ofs;
566*4882a593Smuzhiyun 		else
567*4882a593Smuzhiyun 			thislen = len;
568*4882a593Smuzhiyun 		/* get the chip */
569*4882a593Smuzhiyun 		mutex_lock(&chip->mutex);
570*4882a593Smuzhiyun 		ret = get_chip(map, chip, FL_POINT);
571*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
572*4882a593Smuzhiyun 		if (ret)
573*4882a593Smuzhiyun 			break;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 		chip->state = FL_POINT;
576*4882a593Smuzhiyun 		chip->ref_point_counter++;
577*4882a593Smuzhiyun 		*retlen += thislen;
578*4882a593Smuzhiyun 		len -= thislen;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 		ofs = 0;
581*4882a593Smuzhiyun 		last_end += 1 << lpddr->chipshift;
582*4882a593Smuzhiyun 		chipnum++;
583*4882a593Smuzhiyun 		chip = &lpddr->chips[chipnum];
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun 	return 0;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun 
lpddr_unpoint(struct mtd_info * mtd,loff_t adr,size_t len)588*4882a593Smuzhiyun static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
591*4882a593Smuzhiyun 	struct lpddr_private *lpddr = map->fldrv_priv;
592*4882a593Smuzhiyun 	int chipnum = adr >> lpddr->chipshift, err = 0;
593*4882a593Smuzhiyun 	unsigned long ofs;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	/* ofs: offset within the first chip that the first read should start */
596*4882a593Smuzhiyun 	ofs = adr - (chipnum << lpddr->chipshift);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	while (len) {
599*4882a593Smuzhiyun 		unsigned long thislen;
600*4882a593Smuzhiyun 		struct flchip *chip;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 		chip = &lpddr->chips[chipnum];
603*4882a593Smuzhiyun 		if (chipnum >= lpddr->numchips)
604*4882a593Smuzhiyun 			break;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 		if ((len + ofs - 1) >> lpddr->chipshift)
607*4882a593Smuzhiyun 			thislen = (1<<lpddr->chipshift) - ofs;
608*4882a593Smuzhiyun 		else
609*4882a593Smuzhiyun 			thislen = len;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 		mutex_lock(&chip->mutex);
612*4882a593Smuzhiyun 		if (chip->state == FL_POINT) {
613*4882a593Smuzhiyun 			chip->ref_point_counter--;
614*4882a593Smuzhiyun 			if (chip->ref_point_counter == 0)
615*4882a593Smuzhiyun 				chip->state = FL_READY;
616*4882a593Smuzhiyun 		} else {
617*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: Warning: unpoint called on non"
618*4882a593Smuzhiyun 					"pointed region\n", map->name);
619*4882a593Smuzhiyun 			err = -EINVAL;
620*4882a593Smuzhiyun 		}
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 		put_chip(map, chip);
623*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 		len -= thislen;
626*4882a593Smuzhiyun 		ofs = 0;
627*4882a593Smuzhiyun 		chipnum++;
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	return err;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun 
lpddr_write_buffers(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)633*4882a593Smuzhiyun static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
634*4882a593Smuzhiyun 				size_t *retlen, const u_char *buf)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	struct kvec vec;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	vec.iov_base = (void *) buf;
639*4882a593Smuzhiyun 	vec.iov_len = len;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	return lpddr_writev(mtd, &vec, 1, to, retlen);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 
lpddr_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)645*4882a593Smuzhiyun static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
646*4882a593Smuzhiyun 				unsigned long count, loff_t to, size_t *retlen)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
649*4882a593Smuzhiyun 	struct lpddr_private *lpddr = map->fldrv_priv;
650*4882a593Smuzhiyun 	int ret = 0;
651*4882a593Smuzhiyun 	int chipnum;
652*4882a593Smuzhiyun 	unsigned long ofs, vec_seek, i;
653*4882a593Smuzhiyun 	int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
654*4882a593Smuzhiyun 	size_t len = 0;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	for (i = 0; i < count; i++)
657*4882a593Smuzhiyun 		len += vecs[i].iov_len;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (!len)
660*4882a593Smuzhiyun 		return 0;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	chipnum = to >> lpddr->chipshift;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	ofs = to;
665*4882a593Smuzhiyun 	vec_seek = 0;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	do {
668*4882a593Smuzhiyun 		/* We must not cross write block boundaries */
669*4882a593Smuzhiyun 		int size = wbufsize - (ofs & (wbufsize-1));
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 		if (size > len)
672*4882a593Smuzhiyun 			size = len;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 		ret = do_write_buffer(map, &lpddr->chips[chipnum],
675*4882a593Smuzhiyun 					  ofs, &vecs, &vec_seek, size);
676*4882a593Smuzhiyun 		if (ret)
677*4882a593Smuzhiyun 			return ret;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 		ofs += size;
680*4882a593Smuzhiyun 		(*retlen) += size;
681*4882a593Smuzhiyun 		len -= size;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 		/* Be nice and reschedule with the chip in a usable
684*4882a593Smuzhiyun 		 * state for other processes */
685*4882a593Smuzhiyun 		cond_resched();
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	} while (len);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	return 0;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun 
lpddr_erase(struct mtd_info * mtd,struct erase_info * instr)692*4882a593Smuzhiyun static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	unsigned long ofs, len;
695*4882a593Smuzhiyun 	int ret;
696*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
697*4882a593Smuzhiyun 	struct lpddr_private *lpddr = map->fldrv_priv;
698*4882a593Smuzhiyun 	int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	ofs = instr->addr;
701*4882a593Smuzhiyun 	len = instr->len;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	while (len > 0) {
704*4882a593Smuzhiyun 		ret = do_erase_oneblock(mtd, ofs);
705*4882a593Smuzhiyun 		if (ret)
706*4882a593Smuzhiyun 			return ret;
707*4882a593Smuzhiyun 		ofs += size;
708*4882a593Smuzhiyun 		len -= size;
709*4882a593Smuzhiyun 	}
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	return 0;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun #define DO_XXLOCK_LOCK		1
715*4882a593Smuzhiyun #define DO_XXLOCK_UNLOCK	2
do_xxlock(struct mtd_info * mtd,loff_t adr,uint32_t len,int thunk)716*4882a593Smuzhiyun static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun 	int ret = 0;
719*4882a593Smuzhiyun 	struct map_info *map = mtd->priv;
720*4882a593Smuzhiyun 	struct lpddr_private *lpddr = map->fldrv_priv;
721*4882a593Smuzhiyun 	int chipnum = adr >> lpddr->chipshift;
722*4882a593Smuzhiyun 	struct flchip *chip = &lpddr->chips[chipnum];
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	mutex_lock(&chip->mutex);
725*4882a593Smuzhiyun 	ret = get_chip(map, chip, FL_LOCKING);
726*4882a593Smuzhiyun 	if (ret) {
727*4882a593Smuzhiyun 		mutex_unlock(&chip->mutex);
728*4882a593Smuzhiyun 		return ret;
729*4882a593Smuzhiyun 	}
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	if (thunk == DO_XXLOCK_LOCK) {
732*4882a593Smuzhiyun 		send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
733*4882a593Smuzhiyun 		chip->state = FL_LOCKING;
734*4882a593Smuzhiyun 	} else if (thunk == DO_XXLOCK_UNLOCK) {
735*4882a593Smuzhiyun 		send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
736*4882a593Smuzhiyun 		chip->state = FL_UNLOCKING;
737*4882a593Smuzhiyun 	} else
738*4882a593Smuzhiyun 		BUG();
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	ret = wait_for_ready(map, chip, 1);
741*4882a593Smuzhiyun 	if (ret)	{
742*4882a593Smuzhiyun 		printk(KERN_ERR "%s: block unlock error status %d \n",
743*4882a593Smuzhiyun 				map->name, ret);
744*4882a593Smuzhiyun 		goto out;
745*4882a593Smuzhiyun 	}
746*4882a593Smuzhiyun out:	put_chip(map, chip);
747*4882a593Smuzhiyun 	mutex_unlock(&chip->mutex);
748*4882a593Smuzhiyun 	return ret;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun 
lpddr_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)751*4882a593Smuzhiyun static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun 	return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
lpddr_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)756*4882a593Smuzhiyun static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun 	return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun MODULE_LICENSE("GPL");
762*4882a593Smuzhiyun MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
763*4882a593Smuzhiyun MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
764