xref: /OK3568_Linux_fs/u-boot/drivers/block/blkcache.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) Nelson Integration, LLC 2016
3*4882a593Smuzhiyun  * Author: Eric Nelson<eric@nelint.com>
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <config.h>
9*4882a593Smuzhiyun #include <common.h>
10*4882a593Smuzhiyun #include <malloc.h>
11*4882a593Smuzhiyun #include <part.h>
12*4882a593Smuzhiyun #include <linux/ctype.h>
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun struct block_cache_node {
16*4882a593Smuzhiyun 	struct list_head lh;
17*4882a593Smuzhiyun 	int iftype;
18*4882a593Smuzhiyun 	int devnum;
19*4882a593Smuzhiyun 	lbaint_t start;
20*4882a593Smuzhiyun 	lbaint_t blkcnt;
21*4882a593Smuzhiyun 	unsigned long blksz;
22*4882a593Smuzhiyun 	char *cache;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun static LIST_HEAD(block_cache);
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun static struct block_cache_stats _stats = {
28*4882a593Smuzhiyun 	.max_blocks_per_entry = 2,
29*4882a593Smuzhiyun 	.max_entries = 32
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun 
cache_find(int iftype,int devnum,lbaint_t start,lbaint_t blkcnt,unsigned long blksz)32*4882a593Smuzhiyun static struct block_cache_node *cache_find(int iftype, int devnum,
33*4882a593Smuzhiyun 					   lbaint_t start, lbaint_t blkcnt,
34*4882a593Smuzhiyun 					   unsigned long blksz)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	struct block_cache_node *node;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	list_for_each_entry(node, &block_cache, lh)
39*4882a593Smuzhiyun 		if ((node->iftype == iftype) &&
40*4882a593Smuzhiyun 		    (node->devnum == devnum) &&
41*4882a593Smuzhiyun 		    (node->blksz == blksz) &&
42*4882a593Smuzhiyun 		    (node->start <= start) &&
43*4882a593Smuzhiyun 		    (node->start + node->blkcnt >= start + blkcnt)) {
44*4882a593Smuzhiyun 			if (block_cache.next != &node->lh) {
45*4882a593Smuzhiyun 				/* maintain MRU ordering */
46*4882a593Smuzhiyun 				list_del(&node->lh);
47*4882a593Smuzhiyun 				list_add(&node->lh, &block_cache);
48*4882a593Smuzhiyun 			}
49*4882a593Smuzhiyun 			return node;
50*4882a593Smuzhiyun 		}
51*4882a593Smuzhiyun 	return 0;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
blkcache_read(int iftype,int devnum,lbaint_t start,lbaint_t blkcnt,unsigned long blksz,void * buffer)54*4882a593Smuzhiyun int blkcache_read(int iftype, int devnum,
55*4882a593Smuzhiyun 		  lbaint_t start, lbaint_t blkcnt,
56*4882a593Smuzhiyun 		  unsigned long blksz, void *buffer)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	struct block_cache_node *node = cache_find(iftype, devnum, start,
59*4882a593Smuzhiyun 						   blkcnt, blksz);
60*4882a593Smuzhiyun 	if (node) {
61*4882a593Smuzhiyun 		const char *src = node->cache + (start - node->start) * blksz;
62*4882a593Smuzhiyun 		memcpy(buffer, src, blksz * blkcnt);
63*4882a593Smuzhiyun 		debug("hit: start " LBAF ", count " LBAFU "\n",
64*4882a593Smuzhiyun 		      start, blkcnt);
65*4882a593Smuzhiyun 		++_stats.hits;
66*4882a593Smuzhiyun 		return 1;
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	debug("miss: start " LBAF ", count " LBAFU "\n",
70*4882a593Smuzhiyun 	      start, blkcnt);
71*4882a593Smuzhiyun 	++_stats.misses;
72*4882a593Smuzhiyun 	return 0;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
blkcache_fill(int iftype,int devnum,lbaint_t start,lbaint_t blkcnt,unsigned long blksz,void const * buffer)75*4882a593Smuzhiyun void blkcache_fill(int iftype, int devnum,
76*4882a593Smuzhiyun 		   lbaint_t start, lbaint_t blkcnt,
77*4882a593Smuzhiyun 		   unsigned long blksz, void const *buffer)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	lbaint_t bytes;
80*4882a593Smuzhiyun 	struct block_cache_node *node;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* don't cache big stuff */
83*4882a593Smuzhiyun 	if (blkcnt > _stats.max_blocks_per_entry)
84*4882a593Smuzhiyun 		return;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (_stats.max_entries == 0)
87*4882a593Smuzhiyun 		return;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	bytes = blksz * blkcnt;
90*4882a593Smuzhiyun 	if (_stats.max_entries <= _stats.entries) {
91*4882a593Smuzhiyun 		/* pop LRU */
92*4882a593Smuzhiyun 		node = (struct block_cache_node *)block_cache.prev;
93*4882a593Smuzhiyun 		list_del(&node->lh);
94*4882a593Smuzhiyun 		_stats.entries--;
95*4882a593Smuzhiyun 		debug("drop: start " LBAF ", count " LBAFU "\n",
96*4882a593Smuzhiyun 		      node->start, node->blkcnt);
97*4882a593Smuzhiyun 		if (node->blkcnt * node->blksz < bytes) {
98*4882a593Smuzhiyun 			free(node->cache);
99*4882a593Smuzhiyun 			node->cache = 0;
100*4882a593Smuzhiyun 		}
101*4882a593Smuzhiyun 	} else {
102*4882a593Smuzhiyun 		node = malloc(sizeof(*node));
103*4882a593Smuzhiyun 		if (!node)
104*4882a593Smuzhiyun 			return;
105*4882a593Smuzhiyun 		node->cache = 0;
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (!node->cache) {
109*4882a593Smuzhiyun 		node->cache = malloc(bytes);
110*4882a593Smuzhiyun 		if (!node->cache) {
111*4882a593Smuzhiyun 			free(node);
112*4882a593Smuzhiyun 			return;
113*4882a593Smuzhiyun 		}
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	debug("fill: start " LBAF ", count " LBAFU "\n",
117*4882a593Smuzhiyun 	      start, blkcnt);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	node->iftype = iftype;
120*4882a593Smuzhiyun 	node->devnum = devnum;
121*4882a593Smuzhiyun 	node->start = start;
122*4882a593Smuzhiyun 	node->blkcnt = blkcnt;
123*4882a593Smuzhiyun 	node->blksz = blksz;
124*4882a593Smuzhiyun 	memcpy(node->cache, buffer, bytes);
125*4882a593Smuzhiyun 	list_add(&node->lh, &block_cache);
126*4882a593Smuzhiyun 	_stats.entries++;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
blkcache_invalidate(int iftype,int devnum)129*4882a593Smuzhiyun void blkcache_invalidate(int iftype, int devnum)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	struct list_head *entry, *n;
132*4882a593Smuzhiyun 	struct block_cache_node *node;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	list_for_each_safe(entry, n, &block_cache) {
135*4882a593Smuzhiyun 		node = (struct block_cache_node *)entry;
136*4882a593Smuzhiyun 		if ((node->iftype == iftype) &&
137*4882a593Smuzhiyun 		    (node->devnum == devnum)) {
138*4882a593Smuzhiyun 			list_del(entry);
139*4882a593Smuzhiyun 			free(node->cache);
140*4882a593Smuzhiyun 			free(node);
141*4882a593Smuzhiyun 			--_stats.entries;
142*4882a593Smuzhiyun 		}
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
blkcache_configure(unsigned blocks,unsigned entries)146*4882a593Smuzhiyun void blkcache_configure(unsigned blocks, unsigned entries)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct block_cache_node *node;
149*4882a593Smuzhiyun 	if ((blocks != _stats.max_blocks_per_entry) ||
150*4882a593Smuzhiyun 	    (entries != _stats.max_entries)) {
151*4882a593Smuzhiyun 		/* invalidate cache */
152*4882a593Smuzhiyun 		while (!list_empty(&block_cache)) {
153*4882a593Smuzhiyun 			node = (struct block_cache_node *)block_cache.next;
154*4882a593Smuzhiyun 			list_del(&node->lh);
155*4882a593Smuzhiyun 			free(node->cache);
156*4882a593Smuzhiyun 			free(node);
157*4882a593Smuzhiyun 		}
158*4882a593Smuzhiyun 		_stats.entries = 0;
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	_stats.max_blocks_per_entry = blocks;
162*4882a593Smuzhiyun 	_stats.max_entries = entries;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	_stats.hits = 0;
165*4882a593Smuzhiyun 	_stats.misses = 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
blkcache_stats(struct block_cache_stats * stats)168*4882a593Smuzhiyun void blkcache_stats(struct block_cache_stats *stats)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	memcpy(stats, &_stats, sizeof(*stats));
171*4882a593Smuzhiyun 	_stats.hits = 0;
172*4882a593Smuzhiyun 	_stats.misses = 0;
173*4882a593Smuzhiyun }
174