1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
4*4882a593Smuzhiyun * of PCI-SCSI IO processors.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This driver is derived from the Linux sym53c8xx driver.
9*4882a593Smuzhiyun * Copyright (C) 1998-2000 Gerard Roudier
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
12*4882a593Smuzhiyun * a port of the FreeBSD ncr driver to Linux-1.2.13.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * The original ncr driver has been written for 386bsd and FreeBSD by
15*4882a593Smuzhiyun * Wolfgang Stanglmeier <wolf@cologne.de>
16*4882a593Smuzhiyun * Stefan Esser <se@mi.Uni-Koeln.de>
17*4882a593Smuzhiyun * Copyright (C) 1994 Wolfgang Stanglmeier
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Other major contributions:
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * NVRAM detection and reading.
22*4882a593Smuzhiyun * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun *-----------------------------------------------------------------------------
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "sym_glue.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun * Simple power of two buddy-like generic allocator.
31*4882a593Smuzhiyun * Provides naturally aligned memory chunks.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * This simple code is not intended to be fast, but to
34*4882a593Smuzhiyun * provide power of 2 aligned memory allocations.
35*4882a593Smuzhiyun * Since the SCRIPTS processor only supplies 8 bit arithmetic,
36*4882a593Smuzhiyun * this allocator allows simple and fast address calculations
37*4882a593Smuzhiyun * from the SCRIPTS code. In addition, cache line alignment
38*4882a593Smuzhiyun * is guaranteed for power of 2 cache line size.
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * This allocator has been developed for the Linux sym53c8xx
41*4882a593Smuzhiyun * driver, since this O/S does not provide naturally aligned
42*4882a593Smuzhiyun * allocations.
43*4882a593Smuzhiyun * It has the advantage of allowing the driver to use private
44*4882a593Smuzhiyun * pages of memory that will be useful if we ever need to deal
45*4882a593Smuzhiyun * with IO MMUs for PCI.
46*4882a593Smuzhiyun */
___sym_malloc(m_pool_p mp,int size)47*4882a593Smuzhiyun static void *___sym_malloc(m_pool_p mp, int size)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun int i = 0;
50*4882a593Smuzhiyun int s = (1 << SYM_MEM_SHIFT);
51*4882a593Smuzhiyun int j;
52*4882a593Smuzhiyun void *a;
53*4882a593Smuzhiyun m_link_p h = mp->h;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun if (size > SYM_MEM_CLUSTER_SIZE)
56*4882a593Smuzhiyun return NULL;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun while (size > s) {
59*4882a593Smuzhiyun s <<= 1;
60*4882a593Smuzhiyun ++i;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun j = i;
64*4882a593Smuzhiyun while (!h[j].next) {
65*4882a593Smuzhiyun if (s == SYM_MEM_CLUSTER_SIZE) {
66*4882a593Smuzhiyun h[j].next = (m_link_p) M_GET_MEM_CLUSTER();
67*4882a593Smuzhiyun if (h[j].next)
68*4882a593Smuzhiyun h[j].next->next = NULL;
69*4882a593Smuzhiyun break;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun ++j;
72*4882a593Smuzhiyun s <<= 1;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun a = h[j].next;
75*4882a593Smuzhiyun if (a) {
76*4882a593Smuzhiyun h[j].next = h[j].next->next;
77*4882a593Smuzhiyun while (j > i) {
78*4882a593Smuzhiyun j -= 1;
79*4882a593Smuzhiyun s >>= 1;
80*4882a593Smuzhiyun h[j].next = (m_link_p) (a+s);
81*4882a593Smuzhiyun h[j].next->next = NULL;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun #ifdef DEBUG
85*4882a593Smuzhiyun printf("___sym_malloc(%d) = %p\n", size, (void *) a);
86*4882a593Smuzhiyun #endif
87*4882a593Smuzhiyun return a;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun * Counter-part of the generic allocator.
92*4882a593Smuzhiyun */
___sym_mfree(m_pool_p mp,void * ptr,int size)93*4882a593Smuzhiyun static void ___sym_mfree(m_pool_p mp, void *ptr, int size)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun int i = 0;
96*4882a593Smuzhiyun int s = (1 << SYM_MEM_SHIFT);
97*4882a593Smuzhiyun m_link_p q;
98*4882a593Smuzhiyun unsigned long a, b;
99*4882a593Smuzhiyun m_link_p h = mp->h;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #ifdef DEBUG
102*4882a593Smuzhiyun printf("___sym_mfree(%p, %d)\n", ptr, size);
103*4882a593Smuzhiyun #endif
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (size > SYM_MEM_CLUSTER_SIZE)
106*4882a593Smuzhiyun return;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun while (size > s) {
109*4882a593Smuzhiyun s <<= 1;
110*4882a593Smuzhiyun ++i;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun a = (unsigned long)ptr;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun while (1) {
116*4882a593Smuzhiyun if (s == SYM_MEM_CLUSTER_SIZE) {
117*4882a593Smuzhiyun #ifdef SYM_MEM_FREE_UNUSED
118*4882a593Smuzhiyun M_FREE_MEM_CLUSTER((void *)a);
119*4882a593Smuzhiyun #else
120*4882a593Smuzhiyun ((m_link_p) a)->next = h[i].next;
121*4882a593Smuzhiyun h[i].next = (m_link_p) a;
122*4882a593Smuzhiyun #endif
123*4882a593Smuzhiyun break;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun b = a ^ s;
126*4882a593Smuzhiyun q = &h[i];
127*4882a593Smuzhiyun while (q->next && q->next != (m_link_p) b) {
128*4882a593Smuzhiyun q = q->next;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun if (!q->next) {
131*4882a593Smuzhiyun ((m_link_p) a)->next = h[i].next;
132*4882a593Smuzhiyun h[i].next = (m_link_p) a;
133*4882a593Smuzhiyun break;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun q->next = q->next->next;
136*4882a593Smuzhiyun a = a & b;
137*4882a593Smuzhiyun s <<= 1;
138*4882a593Smuzhiyun ++i;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Verbose and zeroing allocator that wrapps to the generic allocator.
144*4882a593Smuzhiyun */
__sym_calloc2(m_pool_p mp,int size,char * name,int uflags)145*4882a593Smuzhiyun static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun void *p;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun p = ___sym_malloc(mp, size);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (DEBUG_FLAGS & DEBUG_ALLOC) {
152*4882a593Smuzhiyun printf ("new %-10s[%4d] @%p.\n", name, size, p);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (p)
156*4882a593Smuzhiyun memset(p, 0, size);
157*4882a593Smuzhiyun else if (uflags & SYM_MEM_WARN)
158*4882a593Smuzhiyun printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size);
159*4882a593Smuzhiyun return p;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, SYM_MEM_WARN)
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun * Its counter-part.
165*4882a593Smuzhiyun */
__sym_mfree(m_pool_p mp,void * ptr,int size,char * name)166*4882a593Smuzhiyun static void __sym_mfree(m_pool_p mp, void *ptr, int size, char *name)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun if (DEBUG_FLAGS & DEBUG_ALLOC)
169*4882a593Smuzhiyun printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun ___sym_mfree(mp, ptr, size);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun * Default memory pool we donnot need to involve in DMA.
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun * With DMA abstraction, we use functions (methods), to
178*4882a593Smuzhiyun * distinguish between non DMAable memory and DMAable memory.
179*4882a593Smuzhiyun */
___mp0_get_mem_cluster(m_pool_p mp)180*4882a593Smuzhiyun static void *___mp0_get_mem_cluster(m_pool_p mp)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun void *m = sym_get_mem_cluster();
183*4882a593Smuzhiyun if (m)
184*4882a593Smuzhiyun ++mp->nump;
185*4882a593Smuzhiyun return m;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun #ifdef SYM_MEM_FREE_UNUSED
___mp0_free_mem_cluster(m_pool_p mp,void * m)189*4882a593Smuzhiyun static void ___mp0_free_mem_cluster(m_pool_p mp, void *m)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun sym_free_mem_cluster(m);
192*4882a593Smuzhiyun --mp->nump;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun #else
195*4882a593Smuzhiyun #define ___mp0_free_mem_cluster NULL
196*4882a593Smuzhiyun #endif
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun static struct sym_m_pool mp0 = {
199*4882a593Smuzhiyun NULL,
200*4882a593Smuzhiyun ___mp0_get_mem_cluster,
201*4882a593Smuzhiyun ___mp0_free_mem_cluster
202*4882a593Smuzhiyun };
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun * Methods that maintains DMAable pools according to user allocations.
206*4882a593Smuzhiyun * New pools are created on the fly when a new pool id is provided.
207*4882a593Smuzhiyun * They are deleted on the fly when they get emptied.
208*4882a593Smuzhiyun */
209*4882a593Smuzhiyun /* Get a memory cluster that matches the DMA constraints of a given pool */
___get_dma_mem_cluster(m_pool_p mp)210*4882a593Smuzhiyun static void * ___get_dma_mem_cluster(m_pool_p mp)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun m_vtob_p vbp;
213*4882a593Smuzhiyun void *vaddr;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB");
216*4882a593Smuzhiyun if (!vbp)
217*4882a593Smuzhiyun goto out_err;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun vaddr = sym_m_get_dma_mem_cluster(mp, vbp);
220*4882a593Smuzhiyun if (vaddr) {
221*4882a593Smuzhiyun int hc = VTOB_HASH_CODE(vaddr);
222*4882a593Smuzhiyun vbp->next = mp->vtob[hc];
223*4882a593Smuzhiyun mp->vtob[hc] = vbp;
224*4882a593Smuzhiyun ++mp->nump;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun return vaddr;
227*4882a593Smuzhiyun out_err:
228*4882a593Smuzhiyun return NULL;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun #ifdef SYM_MEM_FREE_UNUSED
232*4882a593Smuzhiyun /* Free a memory cluster and associated resources for DMA */
___free_dma_mem_cluster(m_pool_p mp,void * m)233*4882a593Smuzhiyun static void ___free_dma_mem_cluster(m_pool_p mp, void *m)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun m_vtob_p *vbpp, vbp;
236*4882a593Smuzhiyun int hc = VTOB_HASH_CODE(m);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun vbpp = &mp->vtob[hc];
239*4882a593Smuzhiyun while (*vbpp && (*vbpp)->vaddr != m)
240*4882a593Smuzhiyun vbpp = &(*vbpp)->next;
241*4882a593Smuzhiyun if (*vbpp) {
242*4882a593Smuzhiyun vbp = *vbpp;
243*4882a593Smuzhiyun *vbpp = (*vbpp)->next;
244*4882a593Smuzhiyun sym_m_free_dma_mem_cluster(mp, vbp);
245*4882a593Smuzhiyun __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
246*4882a593Smuzhiyun --mp->nump;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun #endif
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Fetch the memory pool for a given pool id (i.e. DMA constraints) */
___get_dma_pool(m_pool_ident_t dev_dmat)252*4882a593Smuzhiyun static inline m_pool_p ___get_dma_pool(m_pool_ident_t dev_dmat)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun m_pool_p mp;
255*4882a593Smuzhiyun for (mp = mp0.next;
256*4882a593Smuzhiyun mp && !sym_m_pool_match(mp->dev_dmat, dev_dmat);
257*4882a593Smuzhiyun mp = mp->next);
258*4882a593Smuzhiyun return mp;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* Create a new memory DMAable pool (when fetch failed) */
___cre_dma_pool(m_pool_ident_t dev_dmat)262*4882a593Smuzhiyun static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun m_pool_p mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
265*4882a593Smuzhiyun if (mp) {
266*4882a593Smuzhiyun mp->dev_dmat = dev_dmat;
267*4882a593Smuzhiyun mp->get_mem_cluster = ___get_dma_mem_cluster;
268*4882a593Smuzhiyun #ifdef SYM_MEM_FREE_UNUSED
269*4882a593Smuzhiyun mp->free_mem_cluster = ___free_dma_mem_cluster;
270*4882a593Smuzhiyun #endif
271*4882a593Smuzhiyun mp->next = mp0.next;
272*4882a593Smuzhiyun mp0.next = mp;
273*4882a593Smuzhiyun return mp;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun return NULL;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun #ifdef SYM_MEM_FREE_UNUSED
279*4882a593Smuzhiyun /* Destroy a DMAable memory pool (when got emptied) */
___del_dma_pool(m_pool_p p)280*4882a593Smuzhiyun static void ___del_dma_pool(m_pool_p p)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun m_pool_p *pp = &mp0.next;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun while (*pp && *pp != p)
285*4882a593Smuzhiyun pp = &(*pp)->next;
286*4882a593Smuzhiyun if (*pp) {
287*4882a593Smuzhiyun *pp = (*pp)->next;
288*4882a593Smuzhiyun __sym_mfree(&mp0, p, sizeof(*p), "MPOOL");
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun #endif
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* This lock protects only the memory allocation/free. */
294*4882a593Smuzhiyun static DEFINE_SPINLOCK(sym53c8xx_lock);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * Actual allocator for DMAable memory.
298*4882a593Smuzhiyun */
__sym_calloc_dma(m_pool_ident_t dev_dmat,int size,char * name)299*4882a593Smuzhiyun void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun unsigned long flags;
302*4882a593Smuzhiyun m_pool_p mp;
303*4882a593Smuzhiyun void *m = NULL;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun spin_lock_irqsave(&sym53c8xx_lock, flags);
306*4882a593Smuzhiyun mp = ___get_dma_pool(dev_dmat);
307*4882a593Smuzhiyun if (!mp)
308*4882a593Smuzhiyun mp = ___cre_dma_pool(dev_dmat);
309*4882a593Smuzhiyun if (!mp)
310*4882a593Smuzhiyun goto out;
311*4882a593Smuzhiyun m = __sym_calloc(mp, size, name);
312*4882a593Smuzhiyun #ifdef SYM_MEM_FREE_UNUSED
313*4882a593Smuzhiyun if (!mp->nump)
314*4882a593Smuzhiyun ___del_dma_pool(mp);
315*4882a593Smuzhiyun #endif
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun out:
318*4882a593Smuzhiyun spin_unlock_irqrestore(&sym53c8xx_lock, flags);
319*4882a593Smuzhiyun return m;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
__sym_mfree_dma(m_pool_ident_t dev_dmat,void * m,int size,char * name)322*4882a593Smuzhiyun void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun unsigned long flags;
325*4882a593Smuzhiyun m_pool_p mp;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun spin_lock_irqsave(&sym53c8xx_lock, flags);
328*4882a593Smuzhiyun mp = ___get_dma_pool(dev_dmat);
329*4882a593Smuzhiyun if (!mp)
330*4882a593Smuzhiyun goto out;
331*4882a593Smuzhiyun __sym_mfree(mp, m, size, name);
332*4882a593Smuzhiyun #ifdef SYM_MEM_FREE_UNUSED
333*4882a593Smuzhiyun if (!mp->nump)
334*4882a593Smuzhiyun ___del_dma_pool(mp);
335*4882a593Smuzhiyun #endif
336*4882a593Smuzhiyun out:
337*4882a593Smuzhiyun spin_unlock_irqrestore(&sym53c8xx_lock, flags);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /*
341*4882a593Smuzhiyun * Actual virtual to bus physical address translator
342*4882a593Smuzhiyun * for 32 bit addressable DMAable memory.
343*4882a593Smuzhiyun */
__vtobus(m_pool_ident_t dev_dmat,void * m)344*4882a593Smuzhiyun dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun unsigned long flags;
347*4882a593Smuzhiyun m_pool_p mp;
348*4882a593Smuzhiyun int hc = VTOB_HASH_CODE(m);
349*4882a593Smuzhiyun m_vtob_p vp = NULL;
350*4882a593Smuzhiyun void *a = (void *)((unsigned long)m & ~SYM_MEM_CLUSTER_MASK);
351*4882a593Smuzhiyun dma_addr_t b;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun spin_lock_irqsave(&sym53c8xx_lock, flags);
354*4882a593Smuzhiyun mp = ___get_dma_pool(dev_dmat);
355*4882a593Smuzhiyun if (mp) {
356*4882a593Smuzhiyun vp = mp->vtob[hc];
357*4882a593Smuzhiyun while (vp && vp->vaddr != a)
358*4882a593Smuzhiyun vp = vp->next;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun if (!vp)
361*4882a593Smuzhiyun panic("sym: VTOBUS FAILED!\n");
362*4882a593Smuzhiyun b = vp->baddr + (m - a);
363*4882a593Smuzhiyun spin_unlock_irqrestore(&sym53c8xx_lock, flags);
364*4882a593Smuzhiyun return b;
365*4882a593Smuzhiyun }
366