1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Driver for the HP iLO management processor.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
6*4882a593Smuzhiyun * David Altobelli <david.altobelli@hpe.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/fs.h>
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/ioport.h>
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/file.h>
17*4882a593Smuzhiyun #include <linux/cdev.h>
18*4882a593Smuzhiyun #include <linux/sched.h>
19*4882a593Smuzhiyun #include <linux/spinlock.h>
20*4882a593Smuzhiyun #include <linux/delay.h>
21*4882a593Smuzhiyun #include <linux/uaccess.h>
22*4882a593Smuzhiyun #include <linux/io.h>
23*4882a593Smuzhiyun #include <linux/wait.h>
24*4882a593Smuzhiyun #include <linux/poll.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun #include "hpilo.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static struct class *ilo_class;
29*4882a593Smuzhiyun static unsigned int ilo_major;
30*4882a593Smuzhiyun static unsigned int max_ccb = 16;
31*4882a593Smuzhiyun static char ilo_hwdev[MAX_ILO_DEV];
32*4882a593Smuzhiyun static const struct pci_device_id ilo_blacklist[] = {
33*4882a593Smuzhiyun /* auxiliary iLO */
34*4882a593Smuzhiyun {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP, 0x1979)},
35*4882a593Smuzhiyun /* CL */
36*4882a593Smuzhiyun {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP_3PAR, 0x0289)},
37*4882a593Smuzhiyun {}
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
get_entry_id(int entry)40*4882a593Smuzhiyun static inline int get_entry_id(int entry)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
get_entry_len(int entry)45*4882a593Smuzhiyun static inline int get_entry_len(int entry)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
mk_entry(int id,int len)50*4882a593Smuzhiyun static inline int mk_entry(int id, int len)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun int qlen = len & 7 ? (len >> 3) + 1 : len >> 3;
53*4882a593Smuzhiyun return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
desc_mem_sz(int nr_entry)56*4882a593Smuzhiyun static inline int desc_mem_sz(int nr_entry)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun return nr_entry << L2_QENTRY_SZ;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * FIFO queues, shared with hardware.
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * If a queue has empty slots, an entry is added to the queue tail,
65*4882a593Smuzhiyun * and that entry is marked as occupied.
66*4882a593Smuzhiyun * Entries can be dequeued from the head of the list, when the device
67*4882a593Smuzhiyun * has marked the entry as consumed.
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * Returns true on successful queue/dequeue, false on failure.
70*4882a593Smuzhiyun */
fifo_enqueue(struct ilo_hwinfo * hw,char * fifobar,int entry)71*4882a593Smuzhiyun static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
74*4882a593Smuzhiyun unsigned long flags;
75*4882a593Smuzhiyun int ret = 0;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun spin_lock_irqsave(&hw->fifo_lock, flags);
78*4882a593Smuzhiyun if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask]
79*4882a593Smuzhiyun & ENTRY_MASK_O)) {
80*4882a593Smuzhiyun fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |=
81*4882a593Smuzhiyun (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge;
82*4882a593Smuzhiyun fifo_q->tail += 1;
83*4882a593Smuzhiyun ret = 1;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun spin_unlock_irqrestore(&hw->fifo_lock, flags);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun return ret;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
fifo_dequeue(struct ilo_hwinfo * hw,char * fifobar,int * entry)90*4882a593Smuzhiyun static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
93*4882a593Smuzhiyun unsigned long flags;
94*4882a593Smuzhiyun int ret = 0;
95*4882a593Smuzhiyun u64 c;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun spin_lock_irqsave(&hw->fifo_lock, flags);
98*4882a593Smuzhiyun c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
99*4882a593Smuzhiyun if (c & ENTRY_MASK_C) {
100*4882a593Smuzhiyun if (entry)
101*4882a593Smuzhiyun *entry = c & ENTRY_MASK_NOSTATE;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun fifo_q->fifobar[fifo_q->head & fifo_q->imask] =
104*4882a593Smuzhiyun (c | ENTRY_MASK) + 1;
105*4882a593Smuzhiyun fifo_q->head += 1;
106*4882a593Smuzhiyun ret = 1;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun spin_unlock_irqrestore(&hw->fifo_lock, flags);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun return ret;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
fifo_check_recv(struct ilo_hwinfo * hw,char * fifobar)113*4882a593Smuzhiyun static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
116*4882a593Smuzhiyun unsigned long flags;
117*4882a593Smuzhiyun int ret = 0;
118*4882a593Smuzhiyun u64 c;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun spin_lock_irqsave(&hw->fifo_lock, flags);
121*4882a593Smuzhiyun c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
122*4882a593Smuzhiyun if (c & ENTRY_MASK_C)
123*4882a593Smuzhiyun ret = 1;
124*4882a593Smuzhiyun spin_unlock_irqrestore(&hw->fifo_lock, flags);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun return ret;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
ilo_pkt_enqueue(struct ilo_hwinfo * hw,struct ccb * ccb,int dir,int id,int len)129*4882a593Smuzhiyun static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb,
130*4882a593Smuzhiyun int dir, int id, int len)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun char *fifobar;
133*4882a593Smuzhiyun int entry;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (dir == SENDQ)
136*4882a593Smuzhiyun fifobar = ccb->ccb_u1.send_fifobar;
137*4882a593Smuzhiyun else
138*4882a593Smuzhiyun fifobar = ccb->ccb_u3.recv_fifobar;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun entry = mk_entry(id, len);
141*4882a593Smuzhiyun return fifo_enqueue(hw, fifobar, entry);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
ilo_pkt_dequeue(struct ilo_hwinfo * hw,struct ccb * ccb,int dir,int * id,int * len,void ** pkt)144*4882a593Smuzhiyun static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb,
145*4882a593Smuzhiyun int dir, int *id, int *len, void **pkt)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun char *fifobar, *desc;
148*4882a593Smuzhiyun int entry = 0, pkt_id = 0;
149*4882a593Smuzhiyun int ret;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (dir == SENDQ) {
152*4882a593Smuzhiyun fifobar = ccb->ccb_u1.send_fifobar;
153*4882a593Smuzhiyun desc = ccb->ccb_u2.send_desc;
154*4882a593Smuzhiyun } else {
155*4882a593Smuzhiyun fifobar = ccb->ccb_u3.recv_fifobar;
156*4882a593Smuzhiyun desc = ccb->ccb_u4.recv_desc;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun ret = fifo_dequeue(hw, fifobar, &entry);
160*4882a593Smuzhiyun if (ret) {
161*4882a593Smuzhiyun pkt_id = get_entry_id(entry);
162*4882a593Smuzhiyun if (id)
163*4882a593Smuzhiyun *id = pkt_id;
164*4882a593Smuzhiyun if (len)
165*4882a593Smuzhiyun *len = get_entry_len(entry);
166*4882a593Smuzhiyun if (pkt)
167*4882a593Smuzhiyun *pkt = (void *)(desc + desc_mem_sz(pkt_id));
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return ret;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
ilo_pkt_recv(struct ilo_hwinfo * hw,struct ccb * ccb)173*4882a593Smuzhiyun static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun char *fifobar = ccb->ccb_u3.recv_fifobar;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun return fifo_check_recv(hw, fifobar);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
doorbell_set(struct ccb * ccb)180*4882a593Smuzhiyun static inline void doorbell_set(struct ccb *ccb)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun iowrite8(1, ccb->ccb_u5.db_base);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
doorbell_clr(struct ccb * ccb)185*4882a593Smuzhiyun static inline void doorbell_clr(struct ccb *ccb)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun iowrite8(2, ccb->ccb_u5.db_base);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
ctrl_set(int l2sz,int idxmask,int desclim)190*4882a593Smuzhiyun static inline int ctrl_set(int l2sz, int idxmask, int desclim)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun int active = 0, go = 1;
193*4882a593Smuzhiyun return l2sz << CTRL_BITPOS_L2SZ |
194*4882a593Smuzhiyun idxmask << CTRL_BITPOS_FIFOINDEXMASK |
195*4882a593Smuzhiyun desclim << CTRL_BITPOS_DESCLIMIT |
196*4882a593Smuzhiyun active << CTRL_BITPOS_A |
197*4882a593Smuzhiyun go << CTRL_BITPOS_G;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
ctrl_setup(struct ccb * ccb,int nr_desc,int l2desc_sz)200*4882a593Smuzhiyun static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun /* for simplicity, use the same parameters for send and recv ctrls */
203*4882a593Smuzhiyun ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
204*4882a593Smuzhiyun ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
fifo_sz(int nr_entry)207*4882a593Smuzhiyun static inline int fifo_sz(int nr_entry)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun /* size of a fifo is determined by the number of entries it contains */
210*4882a593Smuzhiyun return nr_entry * sizeof(u64) + FIFOHANDLESIZE;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
fifo_setup(void * base_addr,int nr_entry)213*4882a593Smuzhiyun static void fifo_setup(void *base_addr, int nr_entry)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct fifo *fifo_q = base_addr;
216*4882a593Smuzhiyun int i;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* set up an empty fifo */
219*4882a593Smuzhiyun fifo_q->head = 0;
220*4882a593Smuzhiyun fifo_q->tail = 0;
221*4882a593Smuzhiyun fifo_q->reset = 0;
222*4882a593Smuzhiyun fifo_q->nrents = nr_entry;
223*4882a593Smuzhiyun fifo_q->imask = nr_entry - 1;
224*4882a593Smuzhiyun fifo_q->merge = ENTRY_MASK_O;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun for (i = 0; i < nr_entry; i++)
227*4882a593Smuzhiyun fifo_q->fifobar[i] = 0;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
ilo_ccb_close(struct pci_dev * pdev,struct ccb_data * data)230*4882a593Smuzhiyun static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun struct ccb *driver_ccb = &data->driver_ccb;
233*4882a593Smuzhiyun struct ccb __iomem *device_ccb = data->mapped_ccb;
234*4882a593Smuzhiyun int retries;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* complicated dance to tell the hw we are stopping */
237*4882a593Smuzhiyun doorbell_clr(driver_ccb);
238*4882a593Smuzhiyun iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G),
239*4882a593Smuzhiyun &device_ccb->send_ctrl);
240*4882a593Smuzhiyun iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G),
241*4882a593Smuzhiyun &device_ccb->recv_ctrl);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* give iLO some time to process stop request */
244*4882a593Smuzhiyun for (retries = MAX_WAIT; retries > 0; retries--) {
245*4882a593Smuzhiyun doorbell_set(driver_ccb);
246*4882a593Smuzhiyun udelay(WAIT_TIME);
247*4882a593Smuzhiyun if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A))
248*4882a593Smuzhiyun &&
249*4882a593Smuzhiyun !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A)))
250*4882a593Smuzhiyun break;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun if (retries == 0)
253*4882a593Smuzhiyun dev_err(&pdev->dev, "Closing, but controller still active\n");
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* clear the hw ccb */
256*4882a593Smuzhiyun memset_io(device_ccb, 0, sizeof(struct ccb));
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* free resources used to back send/recv queues */
259*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, data->dma_size, data->dma_va,
260*4882a593Smuzhiyun data->dma_pa);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
ilo_ccb_setup(struct ilo_hwinfo * hw,struct ccb_data * data,int slot)263*4882a593Smuzhiyun static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun char *dma_va;
266*4882a593Smuzhiyun dma_addr_t dma_pa;
267*4882a593Smuzhiyun struct ccb *driver_ccb, *ilo_ccb;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun driver_ccb = &data->driver_ccb;
270*4882a593Smuzhiyun ilo_ccb = &data->ilo_ccb;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun data->dma_size = 2 * fifo_sz(NR_QENTRY) +
273*4882a593Smuzhiyun 2 * desc_mem_sz(NR_QENTRY) +
274*4882a593Smuzhiyun ILO_START_ALIGN + ILO_CACHE_SZ;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun data->dma_va = dma_alloc_coherent(&hw->ilo_dev->dev, data->dma_size,
277*4882a593Smuzhiyun &data->dma_pa, GFP_ATOMIC);
278*4882a593Smuzhiyun if (!data->dma_va)
279*4882a593Smuzhiyun return -ENOMEM;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun dma_va = (char *)data->dma_va;
282*4882a593Smuzhiyun dma_pa = data->dma_pa;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
285*4882a593Smuzhiyun dma_pa = roundup(dma_pa, ILO_START_ALIGN);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun * Create two ccb's, one with virt addrs, one with phys addrs.
289*4882a593Smuzhiyun * Copy the phys addr ccb to device shared mem.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ);
292*4882a593Smuzhiyun ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun fifo_setup(dma_va, NR_QENTRY);
295*4882a593Smuzhiyun driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE;
296*4882a593Smuzhiyun ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE;
297*4882a593Smuzhiyun dma_va += fifo_sz(NR_QENTRY);
298*4882a593Smuzhiyun dma_pa += fifo_sz(NR_QENTRY);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ);
301*4882a593Smuzhiyun dma_pa = roundup(dma_pa, ILO_CACHE_SZ);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun fifo_setup(dma_va, NR_QENTRY);
304*4882a593Smuzhiyun driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE;
305*4882a593Smuzhiyun ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE;
306*4882a593Smuzhiyun dma_va += fifo_sz(NR_QENTRY);
307*4882a593Smuzhiyun dma_pa += fifo_sz(NR_QENTRY);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun driver_ccb->ccb_u2.send_desc = dma_va;
310*4882a593Smuzhiyun ilo_ccb->ccb_u2.send_desc_pa = dma_pa;
311*4882a593Smuzhiyun dma_pa += desc_mem_sz(NR_QENTRY);
312*4882a593Smuzhiyun dma_va += desc_mem_sz(NR_QENTRY);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun driver_ccb->ccb_u4.recv_desc = dma_va;
315*4882a593Smuzhiyun ilo_ccb->ccb_u4.recv_desc_pa = dma_pa;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun driver_ccb->channel = slot;
318*4882a593Smuzhiyun ilo_ccb->channel = slot;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE);
321*4882a593Smuzhiyun ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
ilo_ccb_open(struct ilo_hwinfo * hw,struct ccb_data * data,int slot)326*4882a593Smuzhiyun static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun int pkt_id, pkt_sz;
329*4882a593Smuzhiyun struct ccb *driver_ccb = &data->driver_ccb;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* copy the ccb with physical addrs to device memory */
332*4882a593Smuzhiyun data->mapped_ccb = (struct ccb __iomem *)
333*4882a593Smuzhiyun (hw->ram_vaddr + (slot * ILOHW_CCB_SZ));
334*4882a593Smuzhiyun memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb));
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* put packets on the send and receive queues */
337*4882a593Smuzhiyun pkt_sz = 0;
338*4882a593Smuzhiyun for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) {
339*4882a593Smuzhiyun ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz);
340*4882a593Smuzhiyun doorbell_set(driver_ccb);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun pkt_sz = desc_mem_sz(1);
344*4882a593Smuzhiyun for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++)
345*4882a593Smuzhiyun ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* the ccb is ready to use */
348*4882a593Smuzhiyun doorbell_clr(driver_ccb);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
ilo_ccb_verify(struct ilo_hwinfo * hw,struct ccb_data * data)351*4882a593Smuzhiyun static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun int pkt_id, i;
354*4882a593Smuzhiyun struct ccb *driver_ccb = &data->driver_ccb;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* make sure iLO is really handling requests */
357*4882a593Smuzhiyun for (i = MAX_WAIT; i > 0; i--) {
358*4882a593Smuzhiyun if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL))
359*4882a593Smuzhiyun break;
360*4882a593Smuzhiyun udelay(WAIT_TIME);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (i == 0) {
364*4882a593Smuzhiyun dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n");
365*4882a593Smuzhiyun return -EBUSY;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0);
369*4882a593Smuzhiyun doorbell_set(driver_ccb);
370*4882a593Smuzhiyun return 0;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
is_channel_reset(struct ccb * ccb)373*4882a593Smuzhiyun static inline int is_channel_reset(struct ccb *ccb)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun /* check for this particular channel needing a reset */
376*4882a593Smuzhiyun return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
set_channel_reset(struct ccb * ccb)379*4882a593Smuzhiyun static inline void set_channel_reset(struct ccb *ccb)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun /* set a flag indicating this channel needs a reset */
382*4882a593Smuzhiyun FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
get_device_outbound(struct ilo_hwinfo * hw)385*4882a593Smuzhiyun static inline int get_device_outbound(struct ilo_hwinfo *hw)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun return ioread32(&hw->mmio_vaddr[DB_OUT]);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
is_db_reset(int db_out)390*4882a593Smuzhiyun static inline int is_db_reset(int db_out)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun return db_out & (1 << DB_RESET);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
is_device_reset(struct ilo_hwinfo * hw)395*4882a593Smuzhiyun static inline int is_device_reset(struct ilo_hwinfo *hw)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun /* check for global reset condition */
398*4882a593Smuzhiyun return is_db_reset(get_device_outbound(hw));
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
clear_pending_db(struct ilo_hwinfo * hw,int clr)401*4882a593Smuzhiyun static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun iowrite32(clr, &hw->mmio_vaddr[DB_OUT]);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
clear_device(struct ilo_hwinfo * hw)406*4882a593Smuzhiyun static inline void clear_device(struct ilo_hwinfo *hw)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun /* clear the device (reset bits, pending channel entries) */
409*4882a593Smuzhiyun clear_pending_db(hw, -1);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
ilo_enable_interrupts(struct ilo_hwinfo * hw)412*4882a593Smuzhiyun static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
ilo_disable_interrupts(struct ilo_hwinfo * hw)417*4882a593Smuzhiyun static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1,
420*4882a593Smuzhiyun &hw->mmio_vaddr[DB_IRQ]);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
ilo_set_reset(struct ilo_hwinfo * hw)423*4882a593Smuzhiyun static void ilo_set_reset(struct ilo_hwinfo *hw)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun int slot;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /*
428*4882a593Smuzhiyun * Mapped memory is zeroed on ilo reset, so set a per ccb flag
429*4882a593Smuzhiyun * to indicate that this ccb needs to be closed and reopened.
430*4882a593Smuzhiyun */
431*4882a593Smuzhiyun for (slot = 0; slot < max_ccb; slot++) {
432*4882a593Smuzhiyun if (!hw->ccb_alloc[slot])
433*4882a593Smuzhiyun continue;
434*4882a593Smuzhiyun set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
ilo_read(struct file * fp,char __user * buf,size_t len,loff_t * off)438*4882a593Smuzhiyun static ssize_t ilo_read(struct file *fp, char __user *buf,
439*4882a593Smuzhiyun size_t len, loff_t *off)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun int err, found, cnt, pkt_id, pkt_len;
442*4882a593Smuzhiyun struct ccb_data *data = fp->private_data;
443*4882a593Smuzhiyun struct ccb *driver_ccb = &data->driver_ccb;
444*4882a593Smuzhiyun struct ilo_hwinfo *hw = data->ilo_hw;
445*4882a593Smuzhiyun void *pkt;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if (is_channel_reset(driver_ccb)) {
448*4882a593Smuzhiyun /*
449*4882a593Smuzhiyun * If the device has been reset, applications
450*4882a593Smuzhiyun * need to close and reopen all ccbs.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun return -ENODEV;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /*
456*4882a593Smuzhiyun * This function is to be called when data is expected
457*4882a593Smuzhiyun * in the channel, and will return an error if no packet is found
458*4882a593Smuzhiyun * during the loop below. The sleep/retry logic is to allow
459*4882a593Smuzhiyun * applications to call read() immediately post write(),
460*4882a593Smuzhiyun * and give iLO some time to process the sent packet.
461*4882a593Smuzhiyun */
462*4882a593Smuzhiyun cnt = 20;
463*4882a593Smuzhiyun do {
464*4882a593Smuzhiyun /* look for a received packet */
465*4882a593Smuzhiyun found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id,
466*4882a593Smuzhiyun &pkt_len, &pkt);
467*4882a593Smuzhiyun if (found)
468*4882a593Smuzhiyun break;
469*4882a593Smuzhiyun cnt--;
470*4882a593Smuzhiyun msleep(100);
471*4882a593Smuzhiyun } while (!found && cnt);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun if (!found)
474*4882a593Smuzhiyun return -EAGAIN;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* only copy the length of the received packet */
477*4882a593Smuzhiyun if (pkt_len < len)
478*4882a593Smuzhiyun len = pkt_len;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun err = copy_to_user(buf, pkt, len);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /* return the received packet to the queue */
483*4882a593Smuzhiyun ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1));
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun return err ? -EFAULT : len;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
ilo_write(struct file * fp,const char __user * buf,size_t len,loff_t * off)488*4882a593Smuzhiyun static ssize_t ilo_write(struct file *fp, const char __user *buf,
489*4882a593Smuzhiyun size_t len, loff_t *off)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun int err, pkt_id, pkt_len;
492*4882a593Smuzhiyun struct ccb_data *data = fp->private_data;
493*4882a593Smuzhiyun struct ccb *driver_ccb = &data->driver_ccb;
494*4882a593Smuzhiyun struct ilo_hwinfo *hw = data->ilo_hw;
495*4882a593Smuzhiyun void *pkt;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (is_channel_reset(driver_ccb))
498*4882a593Smuzhiyun return -ENODEV;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* get a packet to send the user command */
501*4882a593Smuzhiyun if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt))
502*4882a593Smuzhiyun return -EBUSY;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun /* limit the length to the length of the packet */
505*4882a593Smuzhiyun if (pkt_len < len)
506*4882a593Smuzhiyun len = pkt_len;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /* on failure, set the len to 0 to return empty packet to the device */
509*4882a593Smuzhiyun err = copy_from_user(pkt, buf, len);
510*4882a593Smuzhiyun if (err)
511*4882a593Smuzhiyun len = 0;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /* send the packet */
514*4882a593Smuzhiyun ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len);
515*4882a593Smuzhiyun doorbell_set(driver_ccb);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun return err ? -EFAULT : len;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
ilo_poll(struct file * fp,poll_table * wait)520*4882a593Smuzhiyun static __poll_t ilo_poll(struct file *fp, poll_table *wait)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun struct ccb_data *data = fp->private_data;
523*4882a593Smuzhiyun struct ccb *driver_ccb = &data->driver_ccb;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun poll_wait(fp, &data->ccb_waitq, wait);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun if (is_channel_reset(driver_ccb))
528*4882a593Smuzhiyun return EPOLLERR;
529*4882a593Smuzhiyun else if (ilo_pkt_recv(data->ilo_hw, driver_ccb))
530*4882a593Smuzhiyun return EPOLLIN | EPOLLRDNORM;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun return 0;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
ilo_close(struct inode * ip,struct file * fp)535*4882a593Smuzhiyun static int ilo_close(struct inode *ip, struct file *fp)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun int slot;
538*4882a593Smuzhiyun struct ccb_data *data;
539*4882a593Smuzhiyun struct ilo_hwinfo *hw;
540*4882a593Smuzhiyun unsigned long flags;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun slot = iminor(ip) % max_ccb;
543*4882a593Smuzhiyun hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun spin_lock(&hw->open_lock);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (hw->ccb_alloc[slot]->ccb_cnt == 1) {
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun data = fp->private_data;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun spin_lock_irqsave(&hw->alloc_lock, flags);
552*4882a593Smuzhiyun hw->ccb_alloc[slot] = NULL;
553*4882a593Smuzhiyun spin_unlock_irqrestore(&hw->alloc_lock, flags);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun ilo_ccb_close(hw->ilo_dev, data);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun kfree(data);
558*4882a593Smuzhiyun } else
559*4882a593Smuzhiyun hw->ccb_alloc[slot]->ccb_cnt--;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun spin_unlock(&hw->open_lock);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
ilo_open(struct inode * ip,struct file * fp)566*4882a593Smuzhiyun static int ilo_open(struct inode *ip, struct file *fp)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun int slot, error;
569*4882a593Smuzhiyun struct ccb_data *data;
570*4882a593Smuzhiyun struct ilo_hwinfo *hw;
571*4882a593Smuzhiyun unsigned long flags;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun slot = iminor(ip) % max_ccb;
574*4882a593Smuzhiyun hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /* new ccb allocation */
577*4882a593Smuzhiyun data = kzalloc(sizeof(*data), GFP_KERNEL);
578*4882a593Smuzhiyun if (!data)
579*4882a593Smuzhiyun return -ENOMEM;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun spin_lock(&hw->open_lock);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /* each fd private_data holds sw/hw view of ccb */
584*4882a593Smuzhiyun if (hw->ccb_alloc[slot] == NULL) {
585*4882a593Smuzhiyun /* create a channel control block for this minor */
586*4882a593Smuzhiyun error = ilo_ccb_setup(hw, data, slot);
587*4882a593Smuzhiyun if (error) {
588*4882a593Smuzhiyun kfree(data);
589*4882a593Smuzhiyun goto out;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun data->ccb_cnt = 1;
593*4882a593Smuzhiyun data->ccb_excl = fp->f_flags & O_EXCL;
594*4882a593Smuzhiyun data->ilo_hw = hw;
595*4882a593Smuzhiyun init_waitqueue_head(&data->ccb_waitq);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /* write the ccb to hw */
598*4882a593Smuzhiyun spin_lock_irqsave(&hw->alloc_lock, flags);
599*4882a593Smuzhiyun ilo_ccb_open(hw, data, slot);
600*4882a593Smuzhiyun hw->ccb_alloc[slot] = data;
601*4882a593Smuzhiyun spin_unlock_irqrestore(&hw->alloc_lock, flags);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /* make sure the channel is functional */
604*4882a593Smuzhiyun error = ilo_ccb_verify(hw, data);
605*4882a593Smuzhiyun if (error) {
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun spin_lock_irqsave(&hw->alloc_lock, flags);
608*4882a593Smuzhiyun hw->ccb_alloc[slot] = NULL;
609*4882a593Smuzhiyun spin_unlock_irqrestore(&hw->alloc_lock, flags);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun ilo_ccb_close(hw->ilo_dev, data);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun kfree(data);
614*4882a593Smuzhiyun goto out;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun } else {
618*4882a593Smuzhiyun kfree(data);
619*4882a593Smuzhiyun if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) {
620*4882a593Smuzhiyun /*
621*4882a593Smuzhiyun * The channel exists, and either this open
622*4882a593Smuzhiyun * or a previous open of this channel wants
623*4882a593Smuzhiyun * exclusive access.
624*4882a593Smuzhiyun */
625*4882a593Smuzhiyun error = -EBUSY;
626*4882a593Smuzhiyun } else {
627*4882a593Smuzhiyun hw->ccb_alloc[slot]->ccb_cnt++;
628*4882a593Smuzhiyun error = 0;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun out:
632*4882a593Smuzhiyun spin_unlock(&hw->open_lock);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (!error)
635*4882a593Smuzhiyun fp->private_data = hw->ccb_alloc[slot];
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun return error;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun static const struct file_operations ilo_fops = {
641*4882a593Smuzhiyun .owner = THIS_MODULE,
642*4882a593Smuzhiyun .read = ilo_read,
643*4882a593Smuzhiyun .write = ilo_write,
644*4882a593Smuzhiyun .poll = ilo_poll,
645*4882a593Smuzhiyun .open = ilo_open,
646*4882a593Smuzhiyun .release = ilo_close,
647*4882a593Smuzhiyun .llseek = noop_llseek,
648*4882a593Smuzhiyun };
649*4882a593Smuzhiyun
ilo_isr(int irq,void * data)650*4882a593Smuzhiyun static irqreturn_t ilo_isr(int irq, void *data)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun struct ilo_hwinfo *hw = data;
653*4882a593Smuzhiyun int pending, i;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun spin_lock(&hw->alloc_lock);
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /* check for ccbs which have data */
658*4882a593Smuzhiyun pending = get_device_outbound(hw);
659*4882a593Smuzhiyun if (!pending) {
660*4882a593Smuzhiyun spin_unlock(&hw->alloc_lock);
661*4882a593Smuzhiyun return IRQ_NONE;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (is_db_reset(pending)) {
665*4882a593Smuzhiyun /* wake up all ccbs if the device was reset */
666*4882a593Smuzhiyun pending = -1;
667*4882a593Smuzhiyun ilo_set_reset(hw);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun for (i = 0; i < max_ccb; i++) {
671*4882a593Smuzhiyun if (!hw->ccb_alloc[i])
672*4882a593Smuzhiyun continue;
673*4882a593Smuzhiyun if (pending & (1 << i))
674*4882a593Smuzhiyun wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* clear the device of the channels that have been handled */
678*4882a593Smuzhiyun clear_pending_db(hw, pending);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun spin_unlock(&hw->alloc_lock);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun return IRQ_HANDLED;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
ilo_unmap_device(struct pci_dev * pdev,struct ilo_hwinfo * hw)685*4882a593Smuzhiyun static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun pci_iounmap(pdev, hw->db_vaddr);
688*4882a593Smuzhiyun pci_iounmap(pdev, hw->ram_vaddr);
689*4882a593Smuzhiyun pci_iounmap(pdev, hw->mmio_vaddr);
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
ilo_map_device(struct pci_dev * pdev,struct ilo_hwinfo * hw)692*4882a593Smuzhiyun static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun int bar;
695*4882a593Smuzhiyun unsigned long off;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /* map the memory mapped i/o registers */
698*4882a593Smuzhiyun hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
699*4882a593Smuzhiyun if (hw->mmio_vaddr == NULL) {
700*4882a593Smuzhiyun dev_err(&pdev->dev, "Error mapping mmio\n");
701*4882a593Smuzhiyun goto out;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /* map the adapter shared memory region */
705*4882a593Smuzhiyun if (pdev->subsystem_device == 0x00E4) {
706*4882a593Smuzhiyun bar = 5;
707*4882a593Smuzhiyun /* Last 8k is reserved for CCBs */
708*4882a593Smuzhiyun off = pci_resource_len(pdev, bar) - 0x2000;
709*4882a593Smuzhiyun } else {
710*4882a593Smuzhiyun bar = 2;
711*4882a593Smuzhiyun off = 0;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun hw->ram_vaddr = pci_iomap_range(pdev, bar, off, max_ccb * ILOHW_CCB_SZ);
714*4882a593Smuzhiyun if (hw->ram_vaddr == NULL) {
715*4882a593Smuzhiyun dev_err(&pdev->dev, "Error mapping shared mem\n");
716*4882a593Smuzhiyun goto mmio_free;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /* map the doorbell aperture */
720*4882a593Smuzhiyun hw->db_vaddr = pci_iomap(pdev, 3, max_ccb * ONE_DB_SIZE);
721*4882a593Smuzhiyun if (hw->db_vaddr == NULL) {
722*4882a593Smuzhiyun dev_err(&pdev->dev, "Error mapping doorbell\n");
723*4882a593Smuzhiyun goto ram_free;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun return 0;
727*4882a593Smuzhiyun ram_free:
728*4882a593Smuzhiyun pci_iounmap(pdev, hw->ram_vaddr);
729*4882a593Smuzhiyun mmio_free:
730*4882a593Smuzhiyun pci_iounmap(pdev, hw->mmio_vaddr);
731*4882a593Smuzhiyun out:
732*4882a593Smuzhiyun return -ENOMEM;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
ilo_remove(struct pci_dev * pdev)735*4882a593Smuzhiyun static void ilo_remove(struct pci_dev *pdev)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun int i, minor;
738*4882a593Smuzhiyun struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev);
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun if (!ilo_hw)
741*4882a593Smuzhiyun return;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun clear_device(ilo_hw);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun minor = MINOR(ilo_hw->cdev.dev);
746*4882a593Smuzhiyun for (i = minor; i < minor + max_ccb; i++)
747*4882a593Smuzhiyun device_destroy(ilo_class, MKDEV(ilo_major, i));
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun cdev_del(&ilo_hw->cdev);
750*4882a593Smuzhiyun ilo_disable_interrupts(ilo_hw);
751*4882a593Smuzhiyun free_irq(pdev->irq, ilo_hw);
752*4882a593Smuzhiyun ilo_unmap_device(pdev, ilo_hw);
753*4882a593Smuzhiyun pci_release_regions(pdev);
754*4882a593Smuzhiyun /*
755*4882a593Smuzhiyun * pci_disable_device(pdev) used to be here. But this PCI device has
756*4882a593Smuzhiyun * two functions with interrupt lines connected to a single pin. The
757*4882a593Smuzhiyun * other one is a USB host controller. So when we disable the PIN here
758*4882a593Smuzhiyun * e.g. by rmmod hpilo, the controller stops working. It is because
759*4882a593Smuzhiyun * the interrupt link is disabled in ACPI since it is not refcounted
760*4882a593Smuzhiyun * yet. See acpi_pci_link_free_irq called from acpi_pci_irq_disable.
761*4882a593Smuzhiyun */
762*4882a593Smuzhiyun kfree(ilo_hw);
763*4882a593Smuzhiyun ilo_hwdev[(minor / max_ccb)] = 0;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
ilo_probe(struct pci_dev * pdev,const struct pci_device_id * ent)766*4882a593Smuzhiyun static int ilo_probe(struct pci_dev *pdev,
767*4882a593Smuzhiyun const struct pci_device_id *ent)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun int devnum, minor, start, error = 0;
770*4882a593Smuzhiyun struct ilo_hwinfo *ilo_hw;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun if (pci_match_id(ilo_blacklist, pdev)) {
773*4882a593Smuzhiyun dev_dbg(&pdev->dev, "Not supported on this device\n");
774*4882a593Smuzhiyun return -ENODEV;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun if (max_ccb > MAX_CCB)
778*4882a593Smuzhiyun max_ccb = MAX_CCB;
779*4882a593Smuzhiyun else if (max_ccb < MIN_CCB)
780*4882a593Smuzhiyun max_ccb = MIN_CCB;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun /* find a free range for device files */
783*4882a593Smuzhiyun for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) {
784*4882a593Smuzhiyun if (ilo_hwdev[devnum] == 0) {
785*4882a593Smuzhiyun ilo_hwdev[devnum] = 1;
786*4882a593Smuzhiyun break;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun if (devnum == MAX_ILO_DEV) {
791*4882a593Smuzhiyun dev_err(&pdev->dev, "Error finding free device\n");
792*4882a593Smuzhiyun return -ENODEV;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun /* track global allocations for this device */
796*4882a593Smuzhiyun error = -ENOMEM;
797*4882a593Smuzhiyun ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL);
798*4882a593Smuzhiyun if (!ilo_hw)
799*4882a593Smuzhiyun goto out;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun ilo_hw->ilo_dev = pdev;
802*4882a593Smuzhiyun spin_lock_init(&ilo_hw->alloc_lock);
803*4882a593Smuzhiyun spin_lock_init(&ilo_hw->fifo_lock);
804*4882a593Smuzhiyun spin_lock_init(&ilo_hw->open_lock);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun error = pci_enable_device(pdev);
807*4882a593Smuzhiyun if (error)
808*4882a593Smuzhiyun goto free;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun pci_set_master(pdev);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun error = pci_request_regions(pdev, ILO_NAME);
813*4882a593Smuzhiyun if (error)
814*4882a593Smuzhiyun goto disable;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun error = ilo_map_device(pdev, ilo_hw);
817*4882a593Smuzhiyun if (error)
818*4882a593Smuzhiyun goto free_regions;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun pci_set_drvdata(pdev, ilo_hw);
821*4882a593Smuzhiyun clear_device(ilo_hw);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw);
824*4882a593Smuzhiyun if (error)
825*4882a593Smuzhiyun goto unmap;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun ilo_enable_interrupts(ilo_hw);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun cdev_init(&ilo_hw->cdev, &ilo_fops);
830*4882a593Smuzhiyun ilo_hw->cdev.owner = THIS_MODULE;
831*4882a593Smuzhiyun start = devnum * max_ccb;
832*4882a593Smuzhiyun error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), max_ccb);
833*4882a593Smuzhiyun if (error) {
834*4882a593Smuzhiyun dev_err(&pdev->dev, "Could not add cdev\n");
835*4882a593Smuzhiyun goto remove_isr;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun for (minor = 0 ; minor < max_ccb; minor++) {
839*4882a593Smuzhiyun struct device *dev;
840*4882a593Smuzhiyun dev = device_create(ilo_class, &pdev->dev,
841*4882a593Smuzhiyun MKDEV(ilo_major, minor), NULL,
842*4882a593Smuzhiyun "hpilo!d%dccb%d", devnum, minor);
843*4882a593Smuzhiyun if (IS_ERR(dev))
844*4882a593Smuzhiyun dev_err(&pdev->dev, "Could not create files\n");
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun return 0;
848*4882a593Smuzhiyun remove_isr:
849*4882a593Smuzhiyun ilo_disable_interrupts(ilo_hw);
850*4882a593Smuzhiyun free_irq(pdev->irq, ilo_hw);
851*4882a593Smuzhiyun unmap:
852*4882a593Smuzhiyun ilo_unmap_device(pdev, ilo_hw);
853*4882a593Smuzhiyun free_regions:
854*4882a593Smuzhiyun pci_release_regions(pdev);
855*4882a593Smuzhiyun disable:
856*4882a593Smuzhiyun /* pci_disable_device(pdev); see comment in ilo_remove */
857*4882a593Smuzhiyun free:
858*4882a593Smuzhiyun kfree(ilo_hw);
859*4882a593Smuzhiyun out:
860*4882a593Smuzhiyun ilo_hwdev[devnum] = 0;
861*4882a593Smuzhiyun return error;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun static const struct pci_device_id ilo_devices[] = {
865*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) },
866*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) },
867*4882a593Smuzhiyun { }
868*4882a593Smuzhiyun };
869*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, ilo_devices);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun static struct pci_driver ilo_driver = {
872*4882a593Smuzhiyun .name = ILO_NAME,
873*4882a593Smuzhiyun .id_table = ilo_devices,
874*4882a593Smuzhiyun .probe = ilo_probe,
875*4882a593Smuzhiyun .remove = ilo_remove,
876*4882a593Smuzhiyun };
877*4882a593Smuzhiyun
ilo_init(void)878*4882a593Smuzhiyun static int __init ilo_init(void)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun int error;
881*4882a593Smuzhiyun dev_t dev;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun ilo_class = class_create(THIS_MODULE, "iLO");
884*4882a593Smuzhiyun if (IS_ERR(ilo_class)) {
885*4882a593Smuzhiyun error = PTR_ERR(ilo_class);
886*4882a593Smuzhiyun goto out;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME);
890*4882a593Smuzhiyun if (error)
891*4882a593Smuzhiyun goto class_destroy;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun ilo_major = MAJOR(dev);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun error = pci_register_driver(&ilo_driver);
896*4882a593Smuzhiyun if (error)
897*4882a593Smuzhiyun goto chr_remove;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun return 0;
900*4882a593Smuzhiyun chr_remove:
901*4882a593Smuzhiyun unregister_chrdev_region(dev, MAX_OPEN);
902*4882a593Smuzhiyun class_destroy:
903*4882a593Smuzhiyun class_destroy(ilo_class);
904*4882a593Smuzhiyun out:
905*4882a593Smuzhiyun return error;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
ilo_exit(void)908*4882a593Smuzhiyun static void __exit ilo_exit(void)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun pci_unregister_driver(&ilo_driver);
911*4882a593Smuzhiyun unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN);
912*4882a593Smuzhiyun class_destroy(ilo_class);
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun MODULE_VERSION("1.5.0");
916*4882a593Smuzhiyun MODULE_ALIAS(ILO_NAME);
917*4882a593Smuzhiyun MODULE_DESCRIPTION(ILO_NAME);
918*4882a593Smuzhiyun MODULE_AUTHOR("David Altobelli <david.altobelli@hpe.com>");
919*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun module_param(max_ccb, uint, 0444);
922*4882a593Smuzhiyun MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (8-24)(default=16)");
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun module_init(ilo_init);
925*4882a593Smuzhiyun module_exit(ilo_exit);
926