xref: /OK3568_Linux_fs/kernel/drivers/xen/xen-pciback/conf_space.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * PCI Backend - Functions for creating a virtual configuration space for
4*4882a593Smuzhiyun  *               exported PCI Devices.
5*4882a593Smuzhiyun  *               It's dangerous to allow PCI Driver Domains to change their
6*4882a593Smuzhiyun  *               device's resources (memory, i/o ports, interrupts). We need to
7*4882a593Smuzhiyun  *               restrict changes to certain PCI Configuration registers:
8*4882a593Smuzhiyun  *               BARs, INTERRUPT_PIN, most registers in the header...
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define dev_fmt(fmt) DRV_NAME ": " fmt
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/moduleparam.h>
17*4882a593Smuzhiyun #include <linux/pci.h>
18*4882a593Smuzhiyun #include "pciback.h"
19*4882a593Smuzhiyun #include "conf_space.h"
20*4882a593Smuzhiyun #include "conf_space_quirks.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun bool xen_pcibk_permissive;
23*4882a593Smuzhiyun module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
26*4882a593Smuzhiyun  * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
27*4882a593Smuzhiyun #define DEFINE_PCI_CONFIG(op, size, type)			\
28*4882a593Smuzhiyun int xen_pcibk_##op##_config_##size				\
29*4882a593Smuzhiyun (struct pci_dev *dev, int offset, type value, void *data)	\
30*4882a593Smuzhiyun {								\
31*4882a593Smuzhiyun 	return pci_##op##_config_##size(dev, offset, value);	\
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
DEFINE_PCI_CONFIG(read,byte,u8 *)34*4882a593Smuzhiyun DEFINE_PCI_CONFIG(read, byte, u8 *)
35*4882a593Smuzhiyun DEFINE_PCI_CONFIG(read, word, u16 *)
36*4882a593Smuzhiyun DEFINE_PCI_CONFIG(read, dword, u32 *)
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun DEFINE_PCI_CONFIG(write, byte, u8)
39*4882a593Smuzhiyun DEFINE_PCI_CONFIG(write, word, u16)
40*4882a593Smuzhiyun DEFINE_PCI_CONFIG(write, dword, u32)
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun static int conf_space_read(struct pci_dev *dev,
43*4882a593Smuzhiyun 			   const struct config_field_entry *entry,
44*4882a593Smuzhiyun 			   int offset, u32 *value)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	int ret = 0;
47*4882a593Smuzhiyun 	const struct config_field *field = entry->field;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	*value = 0;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	switch (field->size) {
52*4882a593Smuzhiyun 	case 1:
53*4882a593Smuzhiyun 		if (field->u.b.read)
54*4882a593Smuzhiyun 			ret = field->u.b.read(dev, offset, (u8 *) value,
55*4882a593Smuzhiyun 					      entry->data);
56*4882a593Smuzhiyun 		break;
57*4882a593Smuzhiyun 	case 2:
58*4882a593Smuzhiyun 		if (field->u.w.read)
59*4882a593Smuzhiyun 			ret = field->u.w.read(dev, offset, (u16 *) value,
60*4882a593Smuzhiyun 					      entry->data);
61*4882a593Smuzhiyun 		break;
62*4882a593Smuzhiyun 	case 4:
63*4882a593Smuzhiyun 		if (field->u.dw.read)
64*4882a593Smuzhiyun 			ret = field->u.dw.read(dev, offset, value, entry->data);
65*4882a593Smuzhiyun 		break;
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun 	return ret;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
conf_space_write(struct pci_dev * dev,const struct config_field_entry * entry,int offset,u32 value)70*4882a593Smuzhiyun static int conf_space_write(struct pci_dev *dev,
71*4882a593Smuzhiyun 			    const struct config_field_entry *entry,
72*4882a593Smuzhiyun 			    int offset, u32 value)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	int ret = 0;
75*4882a593Smuzhiyun 	const struct config_field *field = entry->field;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	switch (field->size) {
78*4882a593Smuzhiyun 	case 1:
79*4882a593Smuzhiyun 		if (field->u.b.write)
80*4882a593Smuzhiyun 			ret = field->u.b.write(dev, offset, (u8) value,
81*4882a593Smuzhiyun 					       entry->data);
82*4882a593Smuzhiyun 		break;
83*4882a593Smuzhiyun 	case 2:
84*4882a593Smuzhiyun 		if (field->u.w.write)
85*4882a593Smuzhiyun 			ret = field->u.w.write(dev, offset, (u16) value,
86*4882a593Smuzhiyun 					       entry->data);
87*4882a593Smuzhiyun 		break;
88*4882a593Smuzhiyun 	case 4:
89*4882a593Smuzhiyun 		if (field->u.dw.write)
90*4882a593Smuzhiyun 			ret = field->u.dw.write(dev, offset, value,
91*4882a593Smuzhiyun 						entry->data);
92*4882a593Smuzhiyun 		break;
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 	return ret;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
get_mask(int size)97*4882a593Smuzhiyun static inline u32 get_mask(int size)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	if (size == 1)
100*4882a593Smuzhiyun 		return 0xff;
101*4882a593Smuzhiyun 	else if (size == 2)
102*4882a593Smuzhiyun 		return 0xffff;
103*4882a593Smuzhiyun 	else
104*4882a593Smuzhiyun 		return 0xffffffff;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
valid_request(int offset,int size)107*4882a593Smuzhiyun static inline int valid_request(int offset, int size)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	/* Validate request (no un-aligned requests) */
110*4882a593Smuzhiyun 	if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
111*4882a593Smuzhiyun 		return 1;
112*4882a593Smuzhiyun 	return 0;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
merge_value(u32 val,u32 new_val,u32 new_val_mask,int offset)115*4882a593Smuzhiyun static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
116*4882a593Smuzhiyun 			      int offset)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	if (offset >= 0) {
119*4882a593Smuzhiyun 		new_val_mask <<= (offset * 8);
120*4882a593Smuzhiyun 		new_val <<= (offset * 8);
121*4882a593Smuzhiyun 	} else {
122*4882a593Smuzhiyun 		new_val_mask >>= (offset * -8);
123*4882a593Smuzhiyun 		new_val >>= (offset * -8);
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 	val = (val & ~new_val_mask) | (new_val & new_val_mask);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	return val;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
xen_pcibios_err_to_errno(int err)130*4882a593Smuzhiyun static int xen_pcibios_err_to_errno(int err)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	switch (err) {
133*4882a593Smuzhiyun 	case PCIBIOS_SUCCESSFUL:
134*4882a593Smuzhiyun 		return XEN_PCI_ERR_success;
135*4882a593Smuzhiyun 	case PCIBIOS_DEVICE_NOT_FOUND:
136*4882a593Smuzhiyun 		return XEN_PCI_ERR_dev_not_found;
137*4882a593Smuzhiyun 	case PCIBIOS_BAD_REGISTER_NUMBER:
138*4882a593Smuzhiyun 		return XEN_PCI_ERR_invalid_offset;
139*4882a593Smuzhiyun 	case PCIBIOS_FUNC_NOT_SUPPORTED:
140*4882a593Smuzhiyun 		return XEN_PCI_ERR_not_implemented;
141*4882a593Smuzhiyun 	case PCIBIOS_SET_FAILED:
142*4882a593Smuzhiyun 		return XEN_PCI_ERR_access_denied;
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 	return err;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
xen_pcibk_config_read(struct pci_dev * dev,int offset,int size,u32 * ret_val)147*4882a593Smuzhiyun int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
148*4882a593Smuzhiyun 			  u32 *ret_val)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	int err = 0;
151*4882a593Smuzhiyun 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
152*4882a593Smuzhiyun 	const struct config_field_entry *cfg_entry;
153*4882a593Smuzhiyun 	const struct config_field *field;
154*4882a593Smuzhiyun 	int field_start, field_end;
155*4882a593Smuzhiyun 	/* if read fails for any reason, return 0
156*4882a593Smuzhiyun 	 * (as if device didn't respond) */
157*4882a593Smuzhiyun 	u32 value = 0, tmp_val;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	dev_dbg(&dev->dev, "read %d bytes at 0x%x\n", size, offset);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (!valid_request(offset, size)) {
162*4882a593Smuzhiyun 		err = XEN_PCI_ERR_invalid_offset;
163*4882a593Smuzhiyun 		goto out;
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/* Get the real value first, then modify as appropriate */
167*4882a593Smuzhiyun 	switch (size) {
168*4882a593Smuzhiyun 	case 1:
169*4882a593Smuzhiyun 		err = pci_read_config_byte(dev, offset, (u8 *) &value);
170*4882a593Smuzhiyun 		break;
171*4882a593Smuzhiyun 	case 2:
172*4882a593Smuzhiyun 		err = pci_read_config_word(dev, offset, (u16 *) &value);
173*4882a593Smuzhiyun 		break;
174*4882a593Smuzhiyun 	case 4:
175*4882a593Smuzhiyun 		err = pci_read_config_dword(dev, offset, &value);
176*4882a593Smuzhiyun 		break;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
180*4882a593Smuzhiyun 		field = cfg_entry->field;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		field_start = OFFSET(cfg_entry);
183*4882a593Smuzhiyun 		field_end = OFFSET(cfg_entry) + field->size;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		if (offset + size > field_start && field_end > offset) {
186*4882a593Smuzhiyun 			err = conf_space_read(dev, cfg_entry, field_start,
187*4882a593Smuzhiyun 					      &tmp_val);
188*4882a593Smuzhiyun 			if (err)
189*4882a593Smuzhiyun 				goto out;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 			value = merge_value(value, tmp_val,
192*4882a593Smuzhiyun 					    get_mask(field->size),
193*4882a593Smuzhiyun 					    field_start - offset);
194*4882a593Smuzhiyun 		}
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun out:
198*4882a593Smuzhiyun 	dev_dbg(&dev->dev, "read %d bytes at 0x%x = %x\n", size, offset, value);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	*ret_val = value;
201*4882a593Smuzhiyun 	return xen_pcibios_err_to_errno(err);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
xen_pcibk_config_write(struct pci_dev * dev,int offset,int size,u32 value)204*4882a593Smuzhiyun int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	int err = 0, handled = 0;
207*4882a593Smuzhiyun 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
208*4882a593Smuzhiyun 	const struct config_field_entry *cfg_entry;
209*4882a593Smuzhiyun 	const struct config_field *field;
210*4882a593Smuzhiyun 	u32 tmp_val;
211*4882a593Smuzhiyun 	int field_start, field_end;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	dev_dbg(&dev->dev, "write request %d bytes at 0x%x = %x\n",
214*4882a593Smuzhiyun 		size, offset, value);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (!valid_request(offset, size))
217*4882a593Smuzhiyun 		return XEN_PCI_ERR_invalid_offset;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
220*4882a593Smuzhiyun 		field = cfg_entry->field;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 		field_start = OFFSET(cfg_entry);
223*4882a593Smuzhiyun 		field_end = OFFSET(cfg_entry) + field->size;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		if (offset + size > field_start && field_end > offset) {
226*4882a593Smuzhiyun 			err = conf_space_read(dev, cfg_entry, field_start,
227*4882a593Smuzhiyun 					      &tmp_val);
228*4882a593Smuzhiyun 			if (err)
229*4882a593Smuzhiyun 				break;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 			tmp_val = merge_value(tmp_val, value, get_mask(size),
232*4882a593Smuzhiyun 					      offset - field_start);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 			err = conf_space_write(dev, cfg_entry, field_start,
235*4882a593Smuzhiyun 					       tmp_val);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 			/* handled is set true here, but not every byte
238*4882a593Smuzhiyun 			 * may have been written! Properly detecting if
239*4882a593Smuzhiyun 			 * every byte is handled is unnecessary as the
240*4882a593Smuzhiyun 			 * flag is used to detect devices that need
241*4882a593Smuzhiyun 			 * special helpers to work correctly.
242*4882a593Smuzhiyun 			 */
243*4882a593Smuzhiyun 			handled = 1;
244*4882a593Smuzhiyun 		}
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	if (!handled && !err) {
248*4882a593Smuzhiyun 		/* By default, anything not specificially handled above is
249*4882a593Smuzhiyun 		 * read-only. The permissive flag changes this behavior so
250*4882a593Smuzhiyun 		 * that anything not specifically handled above is writable.
251*4882a593Smuzhiyun 		 * This means that some fields may still be read-only because
252*4882a593Smuzhiyun 		 * they have entries in the config_field list that intercept
253*4882a593Smuzhiyun 		 * the write and do nothing. */
254*4882a593Smuzhiyun 		if (dev_data->permissive || xen_pcibk_permissive) {
255*4882a593Smuzhiyun 			switch (size) {
256*4882a593Smuzhiyun 			case 1:
257*4882a593Smuzhiyun 				err = pci_write_config_byte(dev, offset,
258*4882a593Smuzhiyun 							    (u8) value);
259*4882a593Smuzhiyun 				break;
260*4882a593Smuzhiyun 			case 2:
261*4882a593Smuzhiyun 				err = pci_write_config_word(dev, offset,
262*4882a593Smuzhiyun 							    (u16) value);
263*4882a593Smuzhiyun 				break;
264*4882a593Smuzhiyun 			case 4:
265*4882a593Smuzhiyun 				err = pci_write_config_dword(dev, offset,
266*4882a593Smuzhiyun 							     (u32) value);
267*4882a593Smuzhiyun 				break;
268*4882a593Smuzhiyun 			}
269*4882a593Smuzhiyun 		} else if (!dev_data->warned_on_write) {
270*4882a593Smuzhiyun 			dev_data->warned_on_write = 1;
271*4882a593Smuzhiyun 			dev_warn(&dev->dev, "Driver tried to write to a "
272*4882a593Smuzhiyun 				 "read-only configuration space field at offset"
273*4882a593Smuzhiyun 				 " 0x%x, size %d. This may be harmless, but if "
274*4882a593Smuzhiyun 				 "you have problems with your device:\n"
275*4882a593Smuzhiyun 				 "1) see permissive attribute in sysfs\n"
276*4882a593Smuzhiyun 				 "2) report problems to the xen-devel "
277*4882a593Smuzhiyun 				 "mailing list along with details of your "
278*4882a593Smuzhiyun 				 "device obtained from lspci.\n", offset, size);
279*4882a593Smuzhiyun 		}
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	return xen_pcibios_err_to_errno(err);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
xen_pcibk_get_interrupt_type(struct pci_dev * dev)285*4882a593Smuzhiyun int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	int err;
288*4882a593Smuzhiyun 	u16 val;
289*4882a593Smuzhiyun 	int ret = 0;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	err = pci_read_config_word(dev, PCI_COMMAND, &val);
292*4882a593Smuzhiyun 	if (err)
293*4882a593Smuzhiyun 		return err;
294*4882a593Smuzhiyun 	if (!(val & PCI_COMMAND_INTX_DISABLE))
295*4882a593Smuzhiyun 		ret |= INTERRUPT_TYPE_INTX;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/*
298*4882a593Smuzhiyun 	 * Do not trust dev->msi(x)_enabled here, as enabling could be done
299*4882a593Smuzhiyun 	 * bypassing the pci_*msi* functions, by the qemu.
300*4882a593Smuzhiyun 	 */
301*4882a593Smuzhiyun 	if (dev->msi_cap) {
302*4882a593Smuzhiyun 		err = pci_read_config_word(dev,
303*4882a593Smuzhiyun 				dev->msi_cap + PCI_MSI_FLAGS,
304*4882a593Smuzhiyun 				&val);
305*4882a593Smuzhiyun 		if (err)
306*4882a593Smuzhiyun 			return err;
307*4882a593Smuzhiyun 		if (val & PCI_MSI_FLAGS_ENABLE)
308*4882a593Smuzhiyun 			ret |= INTERRUPT_TYPE_MSI;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 	if (dev->msix_cap) {
311*4882a593Smuzhiyun 		err = pci_read_config_word(dev,
312*4882a593Smuzhiyun 				dev->msix_cap + PCI_MSIX_FLAGS,
313*4882a593Smuzhiyun 				&val);
314*4882a593Smuzhiyun 		if (err)
315*4882a593Smuzhiyun 			return err;
316*4882a593Smuzhiyun 		if (val & PCI_MSIX_FLAGS_ENABLE)
317*4882a593Smuzhiyun 			ret |= INTERRUPT_TYPE_MSIX;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 	return ret ?: INTERRUPT_TYPE_NONE;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
xen_pcibk_config_free_dyn_fields(struct pci_dev * dev)322*4882a593Smuzhiyun void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
325*4882a593Smuzhiyun 	struct config_field_entry *cfg_entry, *t;
326*4882a593Smuzhiyun 	const struct config_field *field;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	dev_dbg(&dev->dev, "free-ing dynamically allocated virtual "
329*4882a593Smuzhiyun 			   "configuration space fields\n");
330*4882a593Smuzhiyun 	if (!dev_data)
331*4882a593Smuzhiyun 		return;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
334*4882a593Smuzhiyun 		field = cfg_entry->field;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		if (field->clean) {
337*4882a593Smuzhiyun 			field->clean((struct config_field *)field);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 			kfree(cfg_entry->data);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 			list_del(&cfg_entry->list);
342*4882a593Smuzhiyun 			kfree(cfg_entry);
343*4882a593Smuzhiyun 		}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
xen_pcibk_config_reset_dev(struct pci_dev * dev)348*4882a593Smuzhiyun void xen_pcibk_config_reset_dev(struct pci_dev *dev)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
351*4882a593Smuzhiyun 	const struct config_field_entry *cfg_entry;
352*4882a593Smuzhiyun 	const struct config_field *field;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	dev_dbg(&dev->dev, "resetting virtual configuration space\n");
355*4882a593Smuzhiyun 	if (!dev_data)
356*4882a593Smuzhiyun 		return;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
359*4882a593Smuzhiyun 		field = cfg_entry->field;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		if (field->reset)
362*4882a593Smuzhiyun 			field->reset(dev, OFFSET(cfg_entry), cfg_entry->data);
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
xen_pcibk_config_free_dev(struct pci_dev * dev)366*4882a593Smuzhiyun void xen_pcibk_config_free_dev(struct pci_dev *dev)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
369*4882a593Smuzhiyun 	struct config_field_entry *cfg_entry, *t;
370*4882a593Smuzhiyun 	const struct config_field *field;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	dev_dbg(&dev->dev, "free-ing virtual configuration space fields\n");
373*4882a593Smuzhiyun 	if (!dev_data)
374*4882a593Smuzhiyun 		return;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
377*4882a593Smuzhiyun 		list_del(&cfg_entry->list);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 		field = cfg_entry->field;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 		if (field->release)
382*4882a593Smuzhiyun 			field->release(dev, OFFSET(cfg_entry), cfg_entry->data);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 		kfree(cfg_entry);
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
xen_pcibk_config_add_field_offset(struct pci_dev * dev,const struct config_field * field,unsigned int base_offset)388*4882a593Smuzhiyun int xen_pcibk_config_add_field_offset(struct pci_dev *dev,
389*4882a593Smuzhiyun 				    const struct config_field *field,
390*4882a593Smuzhiyun 				    unsigned int base_offset)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	int err = 0;
393*4882a593Smuzhiyun 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
394*4882a593Smuzhiyun 	struct config_field_entry *cfg_entry;
395*4882a593Smuzhiyun 	void *tmp;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
398*4882a593Smuzhiyun 	if (!cfg_entry) {
399*4882a593Smuzhiyun 		err = -ENOMEM;
400*4882a593Smuzhiyun 		goto out;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	cfg_entry->data = NULL;
404*4882a593Smuzhiyun 	cfg_entry->field = field;
405*4882a593Smuzhiyun 	cfg_entry->base_offset = base_offset;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	/* silently ignore duplicate fields */
408*4882a593Smuzhiyun 	err = xen_pcibk_field_is_dup(dev, OFFSET(cfg_entry));
409*4882a593Smuzhiyun 	if (err)
410*4882a593Smuzhiyun 		goto out;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	if (field->init) {
413*4882a593Smuzhiyun 		tmp = field->init(dev, OFFSET(cfg_entry));
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		if (IS_ERR(tmp)) {
416*4882a593Smuzhiyun 			err = PTR_ERR(tmp);
417*4882a593Smuzhiyun 			goto out;
418*4882a593Smuzhiyun 		}
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 		cfg_entry->data = tmp;
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	dev_dbg(&dev->dev, "added config field at offset 0x%02x\n",
424*4882a593Smuzhiyun 		OFFSET(cfg_entry));
425*4882a593Smuzhiyun 	list_add_tail(&cfg_entry->list, &dev_data->config_fields);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun out:
428*4882a593Smuzhiyun 	if (err)
429*4882a593Smuzhiyun 		kfree(cfg_entry);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	return err;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /* This sets up the device's virtual configuration space to keep track of
435*4882a593Smuzhiyun  * certain registers (like the base address registers (BARs) so that we can
436*4882a593Smuzhiyun  * keep the client from manipulating them directly.
437*4882a593Smuzhiyun  */
xen_pcibk_config_init_dev(struct pci_dev * dev)438*4882a593Smuzhiyun int xen_pcibk_config_init_dev(struct pci_dev *dev)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	int err = 0;
441*4882a593Smuzhiyun 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	dev_dbg(&dev->dev, "initializing virtual configuration space\n");
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dev_data->config_fields);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	err = xen_pcibk_config_header_add_fields(dev);
448*4882a593Smuzhiyun 	if (err)
449*4882a593Smuzhiyun 		goto out;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	err = xen_pcibk_config_capability_add_fields(dev);
452*4882a593Smuzhiyun 	if (err)
453*4882a593Smuzhiyun 		goto out;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	err = xen_pcibk_config_quirks_init(dev);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun out:
458*4882a593Smuzhiyun 	return err;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
xen_pcibk_config_init(void)461*4882a593Smuzhiyun int xen_pcibk_config_init(void)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	return xen_pcibk_config_capability_init();
464*4882a593Smuzhiyun }
465