xref: /OK3568_Linux_fs/kernel/drivers/virtio/virtio_mem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Virtio-mem device driver.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright Red Hat, Inc. 2020
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author(s): David Hildenbrand <david@redhat.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/virtio.h>
11*4882a593Smuzhiyun #include <linux/virtio_mem.h>
12*4882a593Smuzhiyun #include <linux/workqueue.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <linux/memory_hotplug.h>
17*4882a593Smuzhiyun #include <linux/memory.h>
18*4882a593Smuzhiyun #include <linux/hrtimer.h>
19*4882a593Smuzhiyun #include <linux/crash_dump.h>
20*4882a593Smuzhiyun #include <linux/mutex.h>
21*4882a593Smuzhiyun #include <linux/bitmap.h>
22*4882a593Smuzhiyun #include <linux/lockdep.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <acpi/acpi_numa.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun static bool unplug_online = true;
27*4882a593Smuzhiyun module_param(unplug_online, bool, 0644);
28*4882a593Smuzhiyun MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun enum virtio_mem_mb_state {
31*4882a593Smuzhiyun 	/* Unplugged, not added to Linux. Can be reused later. */
32*4882a593Smuzhiyun 	VIRTIO_MEM_MB_STATE_UNUSED = 0,
33*4882a593Smuzhiyun 	/* (Partially) plugged, not added to Linux. Error on add_memory(). */
34*4882a593Smuzhiyun 	VIRTIO_MEM_MB_STATE_PLUGGED,
35*4882a593Smuzhiyun 	/* Fully plugged, fully added to Linux, offline. */
36*4882a593Smuzhiyun 	VIRTIO_MEM_MB_STATE_OFFLINE,
37*4882a593Smuzhiyun 	/* Partially plugged, fully added to Linux, offline. */
38*4882a593Smuzhiyun 	VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL,
39*4882a593Smuzhiyun 	/* Fully plugged, fully added to Linux, online. */
40*4882a593Smuzhiyun 	VIRTIO_MEM_MB_STATE_ONLINE,
41*4882a593Smuzhiyun 	/* Partially plugged, fully added to Linux, online. */
42*4882a593Smuzhiyun 	VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL,
43*4882a593Smuzhiyun 	VIRTIO_MEM_MB_STATE_COUNT
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun struct virtio_mem {
47*4882a593Smuzhiyun 	struct virtio_device *vdev;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	/* We might first have to unplug all memory when starting up. */
50*4882a593Smuzhiyun 	bool unplug_all_required;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	/* Workqueue that processes the plug/unplug requests. */
53*4882a593Smuzhiyun 	struct work_struct wq;
54*4882a593Smuzhiyun 	atomic_t config_changed;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	/* Virtqueue for guest->host requests. */
57*4882a593Smuzhiyun 	struct virtqueue *vq;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	/* Wait for a host response to a guest request. */
60*4882a593Smuzhiyun 	wait_queue_head_t host_resp;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	/* Space for one guest request and the host response. */
63*4882a593Smuzhiyun 	struct virtio_mem_req req;
64*4882a593Smuzhiyun 	struct virtio_mem_resp resp;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	/* The current size of the device. */
67*4882a593Smuzhiyun 	uint64_t plugged_size;
68*4882a593Smuzhiyun 	/* The requested size of the device. */
69*4882a593Smuzhiyun 	uint64_t requested_size;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	/* The device block size (for communicating with the device). */
72*4882a593Smuzhiyun 	uint64_t device_block_size;
73*4882a593Smuzhiyun 	/* The translated node id. NUMA_NO_NODE in case not specified. */
74*4882a593Smuzhiyun 	int nid;
75*4882a593Smuzhiyun 	/* Physical start address of the memory region. */
76*4882a593Smuzhiyun 	uint64_t addr;
77*4882a593Smuzhiyun 	/* Maximum region size in bytes. */
78*4882a593Smuzhiyun 	uint64_t region_size;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/* The subblock size. */
81*4882a593Smuzhiyun 	uint64_t subblock_size;
82*4882a593Smuzhiyun 	/* The number of subblocks per memory block. */
83*4882a593Smuzhiyun 	uint32_t nb_sb_per_mb;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	/* Id of the first memory block of this device. */
86*4882a593Smuzhiyun 	unsigned long first_mb_id;
87*4882a593Smuzhiyun 	/* Id of the last memory block of this device. */
88*4882a593Smuzhiyun 	unsigned long last_mb_id;
89*4882a593Smuzhiyun 	/* Id of the last usable memory block of this device. */
90*4882a593Smuzhiyun 	unsigned long last_usable_mb_id;
91*4882a593Smuzhiyun 	/* Id of the next memory bock to prepare when needed. */
92*4882a593Smuzhiyun 	unsigned long next_mb_id;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/* The parent resource for all memory added via this device. */
95*4882a593Smuzhiyun 	struct resource *parent_resource;
96*4882a593Smuzhiyun 	/*
97*4882a593Smuzhiyun 	 * Copy of "System RAM (virtio_mem)" to be used for
98*4882a593Smuzhiyun 	 * add_memory_driver_managed().
99*4882a593Smuzhiyun 	 */
100*4882a593Smuzhiyun 	const char *resource_name;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	/* Summary of all memory block states. */
103*4882a593Smuzhiyun 	unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
104*4882a593Smuzhiyun #define VIRTIO_MEM_NB_OFFLINE_THRESHOLD		10
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	/*
107*4882a593Smuzhiyun 	 * One byte state per memory block.
108*4882a593Smuzhiyun 	 *
109*4882a593Smuzhiyun 	 * Allocated via vmalloc(). When preparing new blocks, resized
110*4882a593Smuzhiyun 	 * (alloc+copy+free) when needed (crossing pages with the next mb).
111*4882a593Smuzhiyun 	 * (when crossing pages).
112*4882a593Smuzhiyun 	 *
113*4882a593Smuzhiyun 	 * With 128MB memory blocks, we have states for 512GB of memory in one
114*4882a593Smuzhiyun 	 * page.
115*4882a593Smuzhiyun 	 */
116*4882a593Smuzhiyun 	uint8_t *mb_state;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/*
119*4882a593Smuzhiyun 	 * $nb_sb_per_mb bit per memory block. Handled similar to mb_state.
120*4882a593Smuzhiyun 	 *
121*4882a593Smuzhiyun 	 * With 4MB subblocks, we manage 128GB of memory in one page.
122*4882a593Smuzhiyun 	 */
123*4882a593Smuzhiyun 	unsigned long *sb_bitmap;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/*
126*4882a593Smuzhiyun 	 * Mutex that protects the nb_mb_state, mb_state, and sb_bitmap.
127*4882a593Smuzhiyun 	 *
128*4882a593Smuzhiyun 	 * When this lock is held the pointers can't change, ONLINE and
129*4882a593Smuzhiyun 	 * OFFLINE blocks can't change the state and no subblocks will get
130*4882a593Smuzhiyun 	 * plugged/unplugged.
131*4882a593Smuzhiyun 	 */
132*4882a593Smuzhiyun 	struct mutex hotplug_mutex;
133*4882a593Smuzhiyun 	bool hotplug_active;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* An error occurred we cannot handle - stop processing requests. */
136*4882a593Smuzhiyun 	bool broken;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* The driver is being removed. */
139*4882a593Smuzhiyun 	spinlock_t removal_lock;
140*4882a593Smuzhiyun 	bool removing;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/* Timer for retrying to plug/unplug memory. */
143*4882a593Smuzhiyun 	struct hrtimer retry_timer;
144*4882a593Smuzhiyun 	unsigned int retry_timer_ms;
145*4882a593Smuzhiyun #define VIRTIO_MEM_RETRY_TIMER_MIN_MS		50000
146*4882a593Smuzhiyun #define VIRTIO_MEM_RETRY_TIMER_MAX_MS		300000
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	/* Memory notifier (online/offline events). */
149*4882a593Smuzhiyun 	struct notifier_block memory_notifier;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/* Next device in the list of virtio-mem devices. */
152*4882a593Smuzhiyun 	struct list_head next;
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun  * We have to share a single online_page callback among all virtio-mem
157*4882a593Smuzhiyun  * devices. We use RCU to iterate the list in the callback.
158*4882a593Smuzhiyun  */
159*4882a593Smuzhiyun static DEFINE_MUTEX(virtio_mem_mutex);
160*4882a593Smuzhiyun static LIST_HEAD(virtio_mem_devices);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun  * Register a virtio-mem device so it will be considered for the online_page
166*4882a593Smuzhiyun  * callback.
167*4882a593Smuzhiyun  */
register_virtio_mem_device(struct virtio_mem * vm)168*4882a593Smuzhiyun static int register_virtio_mem_device(struct virtio_mem *vm)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	int rc = 0;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/* First device registers the callback. */
173*4882a593Smuzhiyun 	mutex_lock(&virtio_mem_mutex);
174*4882a593Smuzhiyun 	if (list_empty(&virtio_mem_devices))
175*4882a593Smuzhiyun 		rc = set_online_page_callback(&virtio_mem_online_page_cb);
176*4882a593Smuzhiyun 	if (!rc)
177*4882a593Smuzhiyun 		list_add_rcu(&vm->next, &virtio_mem_devices);
178*4882a593Smuzhiyun 	mutex_unlock(&virtio_mem_mutex);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	return rc;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun  * Unregister a virtio-mem device so it will no longer be considered for the
185*4882a593Smuzhiyun  * online_page callback.
186*4882a593Smuzhiyun  */
unregister_virtio_mem_device(struct virtio_mem * vm)187*4882a593Smuzhiyun static void unregister_virtio_mem_device(struct virtio_mem *vm)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	/* Last device unregisters the callback. */
190*4882a593Smuzhiyun 	mutex_lock(&virtio_mem_mutex);
191*4882a593Smuzhiyun 	list_del_rcu(&vm->next);
192*4882a593Smuzhiyun 	if (list_empty(&virtio_mem_devices))
193*4882a593Smuzhiyun 		restore_online_page_callback(&virtio_mem_online_page_cb);
194*4882a593Smuzhiyun 	mutex_unlock(&virtio_mem_mutex);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	synchronize_rcu();
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun /*
200*4882a593Smuzhiyun  * Calculate the memory block id of a given address.
201*4882a593Smuzhiyun  */
virtio_mem_phys_to_mb_id(unsigned long addr)202*4882a593Smuzhiyun static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	return addr / memory_block_size_bytes();
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun  * Calculate the physical start address of a given memory block id.
209*4882a593Smuzhiyun  */
virtio_mem_mb_id_to_phys(unsigned long mb_id)210*4882a593Smuzhiyun static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	return mb_id * memory_block_size_bytes();
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun  * Calculate the subblock id of a given address.
217*4882a593Smuzhiyun  */
virtio_mem_phys_to_sb_id(struct virtio_mem * vm,unsigned long addr)218*4882a593Smuzhiyun static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
219*4882a593Smuzhiyun 					      unsigned long addr)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
222*4882a593Smuzhiyun 	const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return (addr - mb_addr) / vm->subblock_size;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun  * Set the state of a memory block, taking care of the state counter.
229*4882a593Smuzhiyun  */
virtio_mem_mb_set_state(struct virtio_mem * vm,unsigned long mb_id,enum virtio_mem_mb_state state)230*4882a593Smuzhiyun static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id,
231*4882a593Smuzhiyun 				    enum virtio_mem_mb_state state)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	const unsigned long idx = mb_id - vm->first_mb_id;
234*4882a593Smuzhiyun 	enum virtio_mem_mb_state old_state;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	old_state = vm->mb_state[idx];
237*4882a593Smuzhiyun 	vm->mb_state[idx] = state;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	BUG_ON(vm->nb_mb_state[old_state] == 0);
240*4882a593Smuzhiyun 	vm->nb_mb_state[old_state]--;
241*4882a593Smuzhiyun 	vm->nb_mb_state[state]++;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun  * Get the state of a memory block.
246*4882a593Smuzhiyun  */
virtio_mem_mb_get_state(struct virtio_mem * vm,unsigned long mb_id)247*4882a593Smuzhiyun static enum virtio_mem_mb_state virtio_mem_mb_get_state(struct virtio_mem *vm,
248*4882a593Smuzhiyun 							unsigned long mb_id)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	const unsigned long idx = mb_id - vm->first_mb_id;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	return vm->mb_state[idx];
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun  * Prepare the state array for the next memory block.
257*4882a593Smuzhiyun  */
virtio_mem_mb_state_prepare_next_mb(struct virtio_mem * vm)258*4882a593Smuzhiyun static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	unsigned long old_bytes = vm->next_mb_id - vm->first_mb_id + 1;
261*4882a593Smuzhiyun 	unsigned long new_bytes = vm->next_mb_id - vm->first_mb_id + 2;
262*4882a593Smuzhiyun 	int old_pages = PFN_UP(old_bytes);
263*4882a593Smuzhiyun 	int new_pages = PFN_UP(new_bytes);
264*4882a593Smuzhiyun 	uint8_t *new_mb_state;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (vm->mb_state && old_pages == new_pages)
267*4882a593Smuzhiyun 		return 0;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	new_mb_state = vzalloc(new_pages * PAGE_SIZE);
270*4882a593Smuzhiyun 	if (!new_mb_state)
271*4882a593Smuzhiyun 		return -ENOMEM;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	mutex_lock(&vm->hotplug_mutex);
274*4882a593Smuzhiyun 	if (vm->mb_state)
275*4882a593Smuzhiyun 		memcpy(new_mb_state, vm->mb_state, old_pages * PAGE_SIZE);
276*4882a593Smuzhiyun 	vfree(vm->mb_state);
277*4882a593Smuzhiyun 	vm->mb_state = new_mb_state;
278*4882a593Smuzhiyun 	mutex_unlock(&vm->hotplug_mutex);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	return 0;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun #define virtio_mem_for_each_mb_state(_vm, _mb_id, _state) \
284*4882a593Smuzhiyun 	for (_mb_id = _vm->first_mb_id; \
285*4882a593Smuzhiyun 	     _mb_id < _vm->next_mb_id && _vm->nb_mb_state[_state]; \
286*4882a593Smuzhiyun 	     _mb_id++) \
287*4882a593Smuzhiyun 		if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun #define virtio_mem_for_each_mb_state_rev(_vm, _mb_id, _state) \
290*4882a593Smuzhiyun 	for (_mb_id = _vm->next_mb_id - 1; \
291*4882a593Smuzhiyun 	     _mb_id >= _vm->first_mb_id && _vm->nb_mb_state[_state]; \
292*4882a593Smuzhiyun 	     _mb_id--) \
293*4882a593Smuzhiyun 		if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun /*
296*4882a593Smuzhiyun  * Mark all selected subblocks plugged.
297*4882a593Smuzhiyun  *
298*4882a593Smuzhiyun  * Will not modify the state of the memory block.
299*4882a593Smuzhiyun  */
virtio_mem_mb_set_sb_plugged(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)300*4882a593Smuzhiyun static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
301*4882a593Smuzhiyun 					 unsigned long mb_id, int sb_id,
302*4882a593Smuzhiyun 					 int count)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	__bitmap_set(vm->sb_bitmap, bit, count);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun /*
310*4882a593Smuzhiyun  * Mark all selected subblocks unplugged.
311*4882a593Smuzhiyun  *
312*4882a593Smuzhiyun  * Will not modify the state of the memory block.
313*4882a593Smuzhiyun  */
virtio_mem_mb_set_sb_unplugged(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)314*4882a593Smuzhiyun static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm,
315*4882a593Smuzhiyun 					   unsigned long mb_id, int sb_id,
316*4882a593Smuzhiyun 					   int count)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	__bitmap_clear(vm->sb_bitmap, bit, count);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun /*
324*4882a593Smuzhiyun  * Test if all selected subblocks are plugged.
325*4882a593Smuzhiyun  */
virtio_mem_mb_test_sb_plugged(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)326*4882a593Smuzhiyun static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm,
327*4882a593Smuzhiyun 					  unsigned long mb_id, int sb_id,
328*4882a593Smuzhiyun 					  int count)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (count == 1)
333*4882a593Smuzhiyun 		return test_bit(bit, vm->sb_bitmap);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/* TODO: Helper similar to bitmap_set() */
336*4882a593Smuzhiyun 	return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >=
337*4882a593Smuzhiyun 	       bit + count;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun /*
341*4882a593Smuzhiyun  * Test if all selected subblocks are unplugged.
342*4882a593Smuzhiyun  */
virtio_mem_mb_test_sb_unplugged(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)343*4882a593Smuzhiyun static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm,
344*4882a593Smuzhiyun 					    unsigned long mb_id, int sb_id,
345*4882a593Smuzhiyun 					    int count)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	/* TODO: Helper similar to bitmap_set() */
350*4882a593Smuzhiyun 	return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun  * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is
355*4882a593Smuzhiyun  * none.
356*4882a593Smuzhiyun  */
virtio_mem_mb_first_unplugged_sb(struct virtio_mem * vm,unsigned long mb_id)357*4882a593Smuzhiyun static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm,
358*4882a593Smuzhiyun 					    unsigned long mb_id)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) -
363*4882a593Smuzhiyun 	       bit;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun /*
367*4882a593Smuzhiyun  * Prepare the subblock bitmap for the next memory block.
368*4882a593Smuzhiyun  */
virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem * vm)369*4882a593Smuzhiyun static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id;
372*4882a593Smuzhiyun 	const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb;
373*4882a593Smuzhiyun 	const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb;
374*4882a593Smuzhiyun 	int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
375*4882a593Smuzhiyun 	int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
376*4882a593Smuzhiyun 	unsigned long *new_sb_bitmap, *old_sb_bitmap;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	if (vm->sb_bitmap && old_pages == new_pages)
379*4882a593Smuzhiyun 		return 0;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE);
382*4882a593Smuzhiyun 	if (!new_sb_bitmap)
383*4882a593Smuzhiyun 		return -ENOMEM;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	mutex_lock(&vm->hotplug_mutex);
386*4882a593Smuzhiyun 	if (new_sb_bitmap)
387*4882a593Smuzhiyun 		memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	old_sb_bitmap = vm->sb_bitmap;
390*4882a593Smuzhiyun 	vm->sb_bitmap = new_sb_bitmap;
391*4882a593Smuzhiyun 	mutex_unlock(&vm->hotplug_mutex);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	vfree(old_sb_bitmap);
394*4882a593Smuzhiyun 	return 0;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun /*
398*4882a593Smuzhiyun  * Try to add a memory block to Linux. This will usually only fail
399*4882a593Smuzhiyun  * if out of memory.
400*4882a593Smuzhiyun  *
401*4882a593Smuzhiyun  * Must not be called with the vm->hotplug_mutex held (possible deadlock with
402*4882a593Smuzhiyun  * onlining code).
403*4882a593Smuzhiyun  *
404*4882a593Smuzhiyun  * Will not modify the state of the memory block.
405*4882a593Smuzhiyun  */
virtio_mem_mb_add(struct virtio_mem * vm,unsigned long mb_id)406*4882a593Smuzhiyun static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
409*4882a593Smuzhiyun 	int nid = vm->nid;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	if (nid == NUMA_NO_NODE)
412*4882a593Smuzhiyun 		nid = memory_add_physaddr_to_nid(addr);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	/*
415*4882a593Smuzhiyun 	 * When force-unloading the driver and we still have memory added to
416*4882a593Smuzhiyun 	 * Linux, the resource name has to stay.
417*4882a593Smuzhiyun 	 */
418*4882a593Smuzhiyun 	if (!vm->resource_name) {
419*4882a593Smuzhiyun 		vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
420*4882a593Smuzhiyun 						  GFP_KERNEL);
421*4882a593Smuzhiyun 		if (!vm->resource_name)
422*4882a593Smuzhiyun 			return -ENOMEM;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
426*4882a593Smuzhiyun 	return add_memory_driver_managed(nid, addr, memory_block_size_bytes(),
427*4882a593Smuzhiyun 					 vm->resource_name,
428*4882a593Smuzhiyun 					 MEMHP_MERGE_RESOURCE);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun /*
432*4882a593Smuzhiyun  * Try to remove a memory block from Linux. Will only fail if the memory block
433*4882a593Smuzhiyun  * is not offline.
434*4882a593Smuzhiyun  *
435*4882a593Smuzhiyun  * Must not be called with the vm->hotplug_mutex held (possible deadlock with
436*4882a593Smuzhiyun  * onlining code).
437*4882a593Smuzhiyun  *
438*4882a593Smuzhiyun  * Will not modify the state of the memory block.
439*4882a593Smuzhiyun  */
virtio_mem_mb_remove(struct virtio_mem * vm,unsigned long mb_id)440*4882a593Smuzhiyun static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
443*4882a593Smuzhiyun 	int nid = vm->nid;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	if (nid == NUMA_NO_NODE)
446*4882a593Smuzhiyun 		nid = memory_add_physaddr_to_nid(addr);
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id);
449*4882a593Smuzhiyun 	return remove_memory(nid, addr, memory_block_size_bytes());
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun /*
453*4882a593Smuzhiyun  * Try to offline and remove a memory block from Linux.
454*4882a593Smuzhiyun  *
455*4882a593Smuzhiyun  * Must not be called with the vm->hotplug_mutex held (possible deadlock with
456*4882a593Smuzhiyun  * onlining code).
457*4882a593Smuzhiyun  *
458*4882a593Smuzhiyun  * Will not modify the state of the memory block.
459*4882a593Smuzhiyun  */
virtio_mem_mb_offline_and_remove(struct virtio_mem * vm,unsigned long mb_id)460*4882a593Smuzhiyun static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm,
461*4882a593Smuzhiyun 					    unsigned long mb_id)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
464*4882a593Smuzhiyun 	int nid = vm->nid;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	if (nid == NUMA_NO_NODE)
467*4882a593Smuzhiyun 		nid = memory_add_physaddr_to_nid(addr);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n",
470*4882a593Smuzhiyun 		mb_id);
471*4882a593Smuzhiyun 	return offline_and_remove_memory(nid, addr, memory_block_size_bytes());
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun /*
475*4882a593Smuzhiyun  * Trigger the workqueue so the device can perform its magic.
476*4882a593Smuzhiyun  */
virtio_mem_retry(struct virtio_mem * vm)477*4882a593Smuzhiyun static void virtio_mem_retry(struct virtio_mem *vm)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	unsigned long flags;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	spin_lock_irqsave(&vm->removal_lock, flags);
482*4882a593Smuzhiyun 	if (!vm->removing)
483*4882a593Smuzhiyun 		queue_work(system_freezable_wq, &vm->wq);
484*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vm->removal_lock, flags);
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
virtio_mem_translate_node_id(struct virtio_mem * vm,uint16_t node_id)487*4882a593Smuzhiyun static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	int node = NUMA_NO_NODE;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun #if defined(CONFIG_ACPI_NUMA)
492*4882a593Smuzhiyun 	if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
493*4882a593Smuzhiyun 		node = pxm_to_node(node_id);
494*4882a593Smuzhiyun #endif
495*4882a593Smuzhiyun 	return node;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun /*
499*4882a593Smuzhiyun  * Test if a virtio-mem device overlaps with the given range. Can be called
500*4882a593Smuzhiyun  * from (notifier) callbacks lockless.
501*4882a593Smuzhiyun  */
virtio_mem_overlaps_range(struct virtio_mem * vm,unsigned long start,unsigned long size)502*4882a593Smuzhiyun static bool virtio_mem_overlaps_range(struct virtio_mem *vm,
503*4882a593Smuzhiyun 				      unsigned long start, unsigned long size)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	unsigned long dev_start = virtio_mem_mb_id_to_phys(vm->first_mb_id);
506*4882a593Smuzhiyun 	unsigned long dev_end = virtio_mem_mb_id_to_phys(vm->last_mb_id) +
507*4882a593Smuzhiyun 				memory_block_size_bytes();
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	return start < dev_end && dev_start < start + size;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun /*
513*4882a593Smuzhiyun  * Test if a virtio-mem device owns a memory block. Can be called from
514*4882a593Smuzhiyun  * (notifier) callbacks lockless.
515*4882a593Smuzhiyun  */
virtio_mem_owned_mb(struct virtio_mem * vm,unsigned long mb_id)516*4882a593Smuzhiyun static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	return mb_id >= vm->first_mb_id && mb_id <= vm->last_mb_id;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun 
virtio_mem_notify_going_online(struct virtio_mem * vm,unsigned long mb_id)521*4882a593Smuzhiyun static int virtio_mem_notify_going_online(struct virtio_mem *vm,
522*4882a593Smuzhiyun 					  unsigned long mb_id)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	switch (virtio_mem_mb_get_state(vm, mb_id)) {
525*4882a593Smuzhiyun 	case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
526*4882a593Smuzhiyun 	case VIRTIO_MEM_MB_STATE_OFFLINE:
527*4882a593Smuzhiyun 		return NOTIFY_OK;
528*4882a593Smuzhiyun 	default:
529*4882a593Smuzhiyun 		break;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 	dev_warn_ratelimited(&vm->vdev->dev,
532*4882a593Smuzhiyun 			     "memory block onlining denied\n");
533*4882a593Smuzhiyun 	return NOTIFY_BAD;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
virtio_mem_notify_offline(struct virtio_mem * vm,unsigned long mb_id)536*4882a593Smuzhiyun static void virtio_mem_notify_offline(struct virtio_mem *vm,
537*4882a593Smuzhiyun 				      unsigned long mb_id)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	switch (virtio_mem_mb_get_state(vm, mb_id)) {
540*4882a593Smuzhiyun 	case VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL:
541*4882a593Smuzhiyun 		virtio_mem_mb_set_state(vm, mb_id,
542*4882a593Smuzhiyun 					VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
543*4882a593Smuzhiyun 		break;
544*4882a593Smuzhiyun 	case VIRTIO_MEM_MB_STATE_ONLINE:
545*4882a593Smuzhiyun 		virtio_mem_mb_set_state(vm, mb_id,
546*4882a593Smuzhiyun 					VIRTIO_MEM_MB_STATE_OFFLINE);
547*4882a593Smuzhiyun 		break;
548*4882a593Smuzhiyun 	default:
549*4882a593Smuzhiyun 		BUG();
550*4882a593Smuzhiyun 		break;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/*
554*4882a593Smuzhiyun 	 * Trigger the workqueue, maybe we can now unplug memory. Also,
555*4882a593Smuzhiyun 	 * when we offline and remove a memory block, this will re-trigger
556*4882a593Smuzhiyun 	 * us immediately - which is often nice because the removal of
557*4882a593Smuzhiyun 	 * the memory block (e.g., memmap) might have freed up memory
558*4882a593Smuzhiyun 	 * on other memory blocks we manage.
559*4882a593Smuzhiyun 	 */
560*4882a593Smuzhiyun 	virtio_mem_retry(vm);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun 
virtio_mem_notify_online(struct virtio_mem * vm,unsigned long mb_id)563*4882a593Smuzhiyun static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	unsigned long nb_offline;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	switch (virtio_mem_mb_get_state(vm, mb_id)) {
568*4882a593Smuzhiyun 	case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
569*4882a593Smuzhiyun 		virtio_mem_mb_set_state(vm, mb_id,
570*4882a593Smuzhiyun 					VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
571*4882a593Smuzhiyun 		break;
572*4882a593Smuzhiyun 	case VIRTIO_MEM_MB_STATE_OFFLINE:
573*4882a593Smuzhiyun 		virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_ONLINE);
574*4882a593Smuzhiyun 		break;
575*4882a593Smuzhiyun 	default:
576*4882a593Smuzhiyun 		BUG();
577*4882a593Smuzhiyun 		break;
578*4882a593Smuzhiyun 	}
579*4882a593Smuzhiyun 	nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
580*4882a593Smuzhiyun 		     vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	/* see if we can add new blocks now that we onlined one block */
583*4882a593Smuzhiyun 	if (nb_offline == VIRTIO_MEM_NB_OFFLINE_THRESHOLD - 1)
584*4882a593Smuzhiyun 		virtio_mem_retry(vm);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
virtio_mem_notify_going_offline(struct virtio_mem * vm,unsigned long mb_id)587*4882a593Smuzhiyun static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
588*4882a593Smuzhiyun 					    unsigned long mb_id)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
591*4882a593Smuzhiyun 	struct page *page;
592*4882a593Smuzhiyun 	unsigned long pfn;
593*4882a593Smuzhiyun 	int sb_id, i;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
596*4882a593Smuzhiyun 		if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
597*4882a593Smuzhiyun 			continue;
598*4882a593Smuzhiyun 		/*
599*4882a593Smuzhiyun 		 * Drop our reference to the pages so the memory can get
600*4882a593Smuzhiyun 		 * offlined and add the unplugged pages to the managed
601*4882a593Smuzhiyun 		 * page counters (so offlining code can correctly subtract
602*4882a593Smuzhiyun 		 * them again).
603*4882a593Smuzhiyun 		 */
604*4882a593Smuzhiyun 		pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
605*4882a593Smuzhiyun 			       sb_id * vm->subblock_size);
606*4882a593Smuzhiyun 		adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
607*4882a593Smuzhiyun 		for (i = 0; i < nr_pages; i++) {
608*4882a593Smuzhiyun 			page = pfn_to_page(pfn + i);
609*4882a593Smuzhiyun 			if (WARN_ON(!page_ref_dec_and_test(page)))
610*4882a593Smuzhiyun 				dump_page(page, "unplugged page referenced");
611*4882a593Smuzhiyun 		}
612*4882a593Smuzhiyun 	}
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
virtio_mem_notify_cancel_offline(struct virtio_mem * vm,unsigned long mb_id)615*4882a593Smuzhiyun static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
616*4882a593Smuzhiyun 					     unsigned long mb_id)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun 	const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
619*4882a593Smuzhiyun 	unsigned long pfn;
620*4882a593Smuzhiyun 	int sb_id, i;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
623*4882a593Smuzhiyun 		if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
624*4882a593Smuzhiyun 			continue;
625*4882a593Smuzhiyun 		/*
626*4882a593Smuzhiyun 		 * Get the reference we dropped when going offline and
627*4882a593Smuzhiyun 		 * subtract the unplugged pages from the managed page
628*4882a593Smuzhiyun 		 * counters.
629*4882a593Smuzhiyun 		 */
630*4882a593Smuzhiyun 		pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
631*4882a593Smuzhiyun 			       sb_id * vm->subblock_size);
632*4882a593Smuzhiyun 		adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
633*4882a593Smuzhiyun 		for (i = 0; i < nr_pages; i++)
634*4882a593Smuzhiyun 			page_ref_inc(pfn_to_page(pfn + i));
635*4882a593Smuzhiyun 	}
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun /*
639*4882a593Smuzhiyun  * This callback will either be called synchronously from add_memory() or
640*4882a593Smuzhiyun  * asynchronously (e.g., triggered via user space). We have to be careful
641*4882a593Smuzhiyun  * with locking when calling add_memory().
642*4882a593Smuzhiyun  */
virtio_mem_memory_notifier_cb(struct notifier_block * nb,unsigned long action,void * arg)643*4882a593Smuzhiyun static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
644*4882a593Smuzhiyun 					 unsigned long action, void *arg)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	struct virtio_mem *vm = container_of(nb, struct virtio_mem,
647*4882a593Smuzhiyun 					     memory_notifier);
648*4882a593Smuzhiyun 	struct memory_notify *mhp = arg;
649*4882a593Smuzhiyun 	const unsigned long start = PFN_PHYS(mhp->start_pfn);
650*4882a593Smuzhiyun 	const unsigned long size = PFN_PHYS(mhp->nr_pages);
651*4882a593Smuzhiyun 	const unsigned long mb_id = virtio_mem_phys_to_mb_id(start);
652*4882a593Smuzhiyun 	int rc = NOTIFY_OK;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (!virtio_mem_overlaps_range(vm, start, size))
655*4882a593Smuzhiyun 		return NOTIFY_DONE;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/*
658*4882a593Smuzhiyun 	 * Memory is onlined/offlined in memory block granularity. We cannot
659*4882a593Smuzhiyun 	 * cross virtio-mem device boundaries and memory block boundaries. Bail
660*4882a593Smuzhiyun 	 * out if this ever changes.
661*4882a593Smuzhiyun 	 */
662*4882a593Smuzhiyun 	if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
663*4882a593Smuzhiyun 			 !IS_ALIGNED(start, memory_block_size_bytes())))
664*4882a593Smuzhiyun 		return NOTIFY_BAD;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	/*
667*4882a593Smuzhiyun 	 * Avoid circular locking lockdep warnings. We lock the mutex
668*4882a593Smuzhiyun 	 * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
669*4882a593Smuzhiyun 	 * blocking_notifier_call_chain() has it's own lock, which gets unlocked
670*4882a593Smuzhiyun 	 * between both notifier calls and will bail out. False positive.
671*4882a593Smuzhiyun 	 */
672*4882a593Smuzhiyun 	lockdep_off();
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	switch (action) {
675*4882a593Smuzhiyun 	case MEM_GOING_OFFLINE:
676*4882a593Smuzhiyun 		mutex_lock(&vm->hotplug_mutex);
677*4882a593Smuzhiyun 		if (vm->removing) {
678*4882a593Smuzhiyun 			rc = notifier_from_errno(-EBUSY);
679*4882a593Smuzhiyun 			mutex_unlock(&vm->hotplug_mutex);
680*4882a593Smuzhiyun 			break;
681*4882a593Smuzhiyun 		}
682*4882a593Smuzhiyun 		vm->hotplug_active = true;
683*4882a593Smuzhiyun 		virtio_mem_notify_going_offline(vm, mb_id);
684*4882a593Smuzhiyun 		break;
685*4882a593Smuzhiyun 	case MEM_GOING_ONLINE:
686*4882a593Smuzhiyun 		mutex_lock(&vm->hotplug_mutex);
687*4882a593Smuzhiyun 		if (vm->removing) {
688*4882a593Smuzhiyun 			rc = notifier_from_errno(-EBUSY);
689*4882a593Smuzhiyun 			mutex_unlock(&vm->hotplug_mutex);
690*4882a593Smuzhiyun 			break;
691*4882a593Smuzhiyun 		}
692*4882a593Smuzhiyun 		vm->hotplug_active = true;
693*4882a593Smuzhiyun 		rc = virtio_mem_notify_going_online(vm, mb_id);
694*4882a593Smuzhiyun 		break;
695*4882a593Smuzhiyun 	case MEM_OFFLINE:
696*4882a593Smuzhiyun 		virtio_mem_notify_offline(vm, mb_id);
697*4882a593Smuzhiyun 		vm->hotplug_active = false;
698*4882a593Smuzhiyun 		mutex_unlock(&vm->hotplug_mutex);
699*4882a593Smuzhiyun 		break;
700*4882a593Smuzhiyun 	case MEM_ONLINE:
701*4882a593Smuzhiyun 		virtio_mem_notify_online(vm, mb_id);
702*4882a593Smuzhiyun 		vm->hotplug_active = false;
703*4882a593Smuzhiyun 		mutex_unlock(&vm->hotplug_mutex);
704*4882a593Smuzhiyun 		break;
705*4882a593Smuzhiyun 	case MEM_CANCEL_OFFLINE:
706*4882a593Smuzhiyun 		if (!vm->hotplug_active)
707*4882a593Smuzhiyun 			break;
708*4882a593Smuzhiyun 		virtio_mem_notify_cancel_offline(vm, mb_id);
709*4882a593Smuzhiyun 		vm->hotplug_active = false;
710*4882a593Smuzhiyun 		mutex_unlock(&vm->hotplug_mutex);
711*4882a593Smuzhiyun 		break;
712*4882a593Smuzhiyun 	case MEM_CANCEL_ONLINE:
713*4882a593Smuzhiyun 		if (!vm->hotplug_active)
714*4882a593Smuzhiyun 			break;
715*4882a593Smuzhiyun 		vm->hotplug_active = false;
716*4882a593Smuzhiyun 		mutex_unlock(&vm->hotplug_mutex);
717*4882a593Smuzhiyun 		break;
718*4882a593Smuzhiyun 	default:
719*4882a593Smuzhiyun 		break;
720*4882a593Smuzhiyun 	}
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	lockdep_on();
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	return rc;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun /*
728*4882a593Smuzhiyun  * Set a range of pages PG_offline. Remember pages that were never onlined
729*4882a593Smuzhiyun  * (via generic_online_page()) using PageDirty().
730*4882a593Smuzhiyun  */
virtio_mem_set_fake_offline(unsigned long pfn,unsigned int nr_pages,bool onlined)731*4882a593Smuzhiyun static void virtio_mem_set_fake_offline(unsigned long pfn,
732*4882a593Smuzhiyun 					unsigned int nr_pages, bool onlined)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	for (; nr_pages--; pfn++) {
735*4882a593Smuzhiyun 		struct page *page = pfn_to_page(pfn);
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 		__SetPageOffline(page);
738*4882a593Smuzhiyun 		if (!onlined) {
739*4882a593Smuzhiyun 			SetPageDirty(page);
740*4882a593Smuzhiyun 			/* FIXME: remove after cleanups */
741*4882a593Smuzhiyun 			ClearPageReserved(page);
742*4882a593Smuzhiyun 		}
743*4882a593Smuzhiyun 	}
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun /*
747*4882a593Smuzhiyun  * Clear PG_offline from a range of pages. If the pages were never onlined,
748*4882a593Smuzhiyun  * (via generic_online_page()), clear PageDirty().
749*4882a593Smuzhiyun  */
virtio_mem_clear_fake_offline(unsigned long pfn,unsigned int nr_pages,bool onlined)750*4882a593Smuzhiyun static void virtio_mem_clear_fake_offline(unsigned long pfn,
751*4882a593Smuzhiyun 					  unsigned int nr_pages, bool onlined)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun 	for (; nr_pages--; pfn++) {
754*4882a593Smuzhiyun 		struct page *page = pfn_to_page(pfn);
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 		__ClearPageOffline(page);
757*4882a593Smuzhiyun 		if (!onlined)
758*4882a593Smuzhiyun 			ClearPageDirty(page);
759*4882a593Smuzhiyun 	}
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun /*
763*4882a593Smuzhiyun  * Release a range of fake-offline pages to the buddy, effectively
764*4882a593Smuzhiyun  * fake-onlining them.
765*4882a593Smuzhiyun  */
virtio_mem_fake_online(unsigned long pfn,unsigned int nr_pages)766*4882a593Smuzhiyun static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun 	const int order = MAX_ORDER - 1;
769*4882a593Smuzhiyun 	int i;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	/*
772*4882a593Smuzhiyun 	 * We are always called with subblock granularity, which is at least
773*4882a593Smuzhiyun 	 * aligned to MAX_ORDER - 1.
774*4882a593Smuzhiyun 	 */
775*4882a593Smuzhiyun 	for (i = 0; i < nr_pages; i += 1 << order) {
776*4882a593Smuzhiyun 		struct page *page = pfn_to_page(pfn + i);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 		/*
779*4882a593Smuzhiyun 		 * If the page is PageDirty(), it was kept fake-offline when
780*4882a593Smuzhiyun 		 * onlining the memory block. Otherwise, it was allocated
781*4882a593Smuzhiyun 		 * using alloc_contig_range(). All pages in a subblock are
782*4882a593Smuzhiyun 		 * alike.
783*4882a593Smuzhiyun 		 */
784*4882a593Smuzhiyun 		if (PageDirty(page)) {
785*4882a593Smuzhiyun 			virtio_mem_clear_fake_offline(pfn + i, 1 << order,
786*4882a593Smuzhiyun 						      false);
787*4882a593Smuzhiyun 			generic_online_page(page, order);
788*4882a593Smuzhiyun 		} else {
789*4882a593Smuzhiyun 			virtio_mem_clear_fake_offline(pfn + i, 1 << order,
790*4882a593Smuzhiyun 						      true);
791*4882a593Smuzhiyun 			free_contig_range(pfn + i, 1 << order);
792*4882a593Smuzhiyun 			adjust_managed_page_count(page, 1 << order);
793*4882a593Smuzhiyun 		}
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun 
virtio_mem_online_page_cb(struct page * page,unsigned int order)797*4882a593Smuzhiyun static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun 	const unsigned long addr = page_to_phys(page);
800*4882a593Smuzhiyun 	const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
801*4882a593Smuzhiyun 	struct virtio_mem *vm;
802*4882a593Smuzhiyun 	int sb_id;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	/*
805*4882a593Smuzhiyun 	 * We exploit here that subblocks have at least MAX_ORDER - 1
806*4882a593Smuzhiyun 	 * size/alignment and that this callback is is called with such a
807*4882a593Smuzhiyun 	 * size/alignment. So we cannot cross subblocks and therefore
808*4882a593Smuzhiyun 	 * also not memory blocks.
809*4882a593Smuzhiyun 	 */
810*4882a593Smuzhiyun 	rcu_read_lock();
811*4882a593Smuzhiyun 	list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
812*4882a593Smuzhiyun 		if (!virtio_mem_owned_mb(vm, mb_id))
813*4882a593Smuzhiyun 			continue;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 		sb_id = virtio_mem_phys_to_sb_id(vm, addr);
816*4882a593Smuzhiyun 		/*
817*4882a593Smuzhiyun 		 * If plugged, online the pages, otherwise, set them fake
818*4882a593Smuzhiyun 		 * offline (PageOffline).
819*4882a593Smuzhiyun 		 */
820*4882a593Smuzhiyun 		if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
821*4882a593Smuzhiyun 			generic_online_page(page, order);
822*4882a593Smuzhiyun 		else
823*4882a593Smuzhiyun 			virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
824*4882a593Smuzhiyun 						    false);
825*4882a593Smuzhiyun 		rcu_read_unlock();
826*4882a593Smuzhiyun 		return;
827*4882a593Smuzhiyun 	}
828*4882a593Smuzhiyun 	rcu_read_unlock();
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	/* not virtio-mem memory, but e.g., a DIMM. online it */
831*4882a593Smuzhiyun 	generic_online_page(page, order);
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun 
virtio_mem_send_request(struct virtio_mem * vm,const struct virtio_mem_req * req)834*4882a593Smuzhiyun static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
835*4882a593Smuzhiyun 					const struct virtio_mem_req *req)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	struct scatterlist *sgs[2], sg_req, sg_resp;
838*4882a593Smuzhiyun 	unsigned int len;
839*4882a593Smuzhiyun 	int rc;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	/* don't use the request residing on the stack (vaddr) */
842*4882a593Smuzhiyun 	vm->req = *req;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	/* out: buffer for request */
845*4882a593Smuzhiyun 	sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
846*4882a593Smuzhiyun 	sgs[0] = &sg_req;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	/* in: buffer for response */
849*4882a593Smuzhiyun 	sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
850*4882a593Smuzhiyun 	sgs[1] = &sg_resp;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
853*4882a593Smuzhiyun 	if (rc < 0)
854*4882a593Smuzhiyun 		return rc;
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	virtqueue_kick(vm->vq);
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	/* wait for a response */
859*4882a593Smuzhiyun 	wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	return virtio16_to_cpu(vm->vdev, vm->resp.type);
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun 
virtio_mem_send_plug_request(struct virtio_mem * vm,uint64_t addr,uint64_t size)864*4882a593Smuzhiyun static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
865*4882a593Smuzhiyun 					uint64_t size)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun 	const uint64_t nb_vm_blocks = size / vm->device_block_size;
868*4882a593Smuzhiyun 	const struct virtio_mem_req req = {
869*4882a593Smuzhiyun 		.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
870*4882a593Smuzhiyun 		.u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
871*4882a593Smuzhiyun 		.u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
872*4882a593Smuzhiyun 	};
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	if (atomic_read(&vm->config_changed))
875*4882a593Smuzhiyun 		return -EAGAIN;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	switch (virtio_mem_send_request(vm, &req)) {
878*4882a593Smuzhiyun 	case VIRTIO_MEM_RESP_ACK:
879*4882a593Smuzhiyun 		vm->plugged_size += size;
880*4882a593Smuzhiyun 		return 0;
881*4882a593Smuzhiyun 	case VIRTIO_MEM_RESP_NACK:
882*4882a593Smuzhiyun 		return -EAGAIN;
883*4882a593Smuzhiyun 	case VIRTIO_MEM_RESP_BUSY:
884*4882a593Smuzhiyun 		return -ETXTBSY;
885*4882a593Smuzhiyun 	case VIRTIO_MEM_RESP_ERROR:
886*4882a593Smuzhiyun 		return -EINVAL;
887*4882a593Smuzhiyun 	default:
888*4882a593Smuzhiyun 		return -ENOMEM;
889*4882a593Smuzhiyun 	}
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun 
virtio_mem_send_unplug_request(struct virtio_mem * vm,uint64_t addr,uint64_t size)892*4882a593Smuzhiyun static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
893*4882a593Smuzhiyun 					  uint64_t size)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun 	const uint64_t nb_vm_blocks = size / vm->device_block_size;
896*4882a593Smuzhiyun 	const struct virtio_mem_req req = {
897*4882a593Smuzhiyun 		.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
898*4882a593Smuzhiyun 		.u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
899*4882a593Smuzhiyun 		.u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
900*4882a593Smuzhiyun 	};
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	if (atomic_read(&vm->config_changed))
903*4882a593Smuzhiyun 		return -EAGAIN;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	switch (virtio_mem_send_request(vm, &req)) {
906*4882a593Smuzhiyun 	case VIRTIO_MEM_RESP_ACK:
907*4882a593Smuzhiyun 		vm->plugged_size -= size;
908*4882a593Smuzhiyun 		return 0;
909*4882a593Smuzhiyun 	case VIRTIO_MEM_RESP_BUSY:
910*4882a593Smuzhiyun 		return -ETXTBSY;
911*4882a593Smuzhiyun 	case VIRTIO_MEM_RESP_ERROR:
912*4882a593Smuzhiyun 		return -EINVAL;
913*4882a593Smuzhiyun 	default:
914*4882a593Smuzhiyun 		return -ENOMEM;
915*4882a593Smuzhiyun 	}
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun 
virtio_mem_send_unplug_all_request(struct virtio_mem * vm)918*4882a593Smuzhiyun static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun 	const struct virtio_mem_req req = {
921*4882a593Smuzhiyun 		.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
922*4882a593Smuzhiyun 	};
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	switch (virtio_mem_send_request(vm, &req)) {
925*4882a593Smuzhiyun 	case VIRTIO_MEM_RESP_ACK:
926*4882a593Smuzhiyun 		vm->unplug_all_required = false;
927*4882a593Smuzhiyun 		vm->plugged_size = 0;
928*4882a593Smuzhiyun 		/* usable region might have shrunk */
929*4882a593Smuzhiyun 		atomic_set(&vm->config_changed, 1);
930*4882a593Smuzhiyun 		return 0;
931*4882a593Smuzhiyun 	case VIRTIO_MEM_RESP_BUSY:
932*4882a593Smuzhiyun 		return -ETXTBSY;
933*4882a593Smuzhiyun 	default:
934*4882a593Smuzhiyun 		return -ENOMEM;
935*4882a593Smuzhiyun 	}
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun /*
939*4882a593Smuzhiyun  * Plug selected subblocks. Updates the plugged state, but not the state
940*4882a593Smuzhiyun  * of the memory block.
941*4882a593Smuzhiyun  */
virtio_mem_mb_plug_sb(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)942*4882a593Smuzhiyun static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
943*4882a593Smuzhiyun 				 int sb_id, int count)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun 	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
946*4882a593Smuzhiyun 			      sb_id * vm->subblock_size;
947*4882a593Smuzhiyun 	const uint64_t size = count * vm->subblock_size;
948*4882a593Smuzhiyun 	int rc;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	dev_dbg(&vm->vdev->dev, "plugging memory block: %lu : %i - %i\n", mb_id,
951*4882a593Smuzhiyun 		sb_id, sb_id + count - 1);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	rc = virtio_mem_send_plug_request(vm, addr, size);
954*4882a593Smuzhiyun 	if (!rc)
955*4882a593Smuzhiyun 		virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count);
956*4882a593Smuzhiyun 	return rc;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun /*
960*4882a593Smuzhiyun  * Unplug selected subblocks. Updates the plugged state, but not the state
961*4882a593Smuzhiyun  * of the memory block.
962*4882a593Smuzhiyun  */
virtio_mem_mb_unplug_sb(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)963*4882a593Smuzhiyun static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
964*4882a593Smuzhiyun 				   int sb_id, int count)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
967*4882a593Smuzhiyun 			      sb_id * vm->subblock_size;
968*4882a593Smuzhiyun 	const uint64_t size = count * vm->subblock_size;
969*4882a593Smuzhiyun 	int rc;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	dev_dbg(&vm->vdev->dev, "unplugging memory block: %lu : %i - %i\n",
972*4882a593Smuzhiyun 		mb_id, sb_id, sb_id + count - 1);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	rc = virtio_mem_send_unplug_request(vm, addr, size);
975*4882a593Smuzhiyun 	if (!rc)
976*4882a593Smuzhiyun 		virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count);
977*4882a593Smuzhiyun 	return rc;
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun /*
981*4882a593Smuzhiyun  * Unplug the desired number of plugged subblocks of a offline or not-added
982*4882a593Smuzhiyun  * memory block. Will fail if any subblock cannot get unplugged (instead of
983*4882a593Smuzhiyun  * skipping it).
984*4882a593Smuzhiyun  *
985*4882a593Smuzhiyun  * Will not modify the state of the memory block.
986*4882a593Smuzhiyun  *
987*4882a593Smuzhiyun  * Note: can fail after some subblocks were unplugged.
988*4882a593Smuzhiyun  */
virtio_mem_mb_unplug_any_sb(struct virtio_mem * vm,unsigned long mb_id,uint64_t * nb_sb)989*4882a593Smuzhiyun static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
990*4882a593Smuzhiyun 				       unsigned long mb_id, uint64_t *nb_sb)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	int sb_id, count;
993*4882a593Smuzhiyun 	int rc;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	sb_id = vm->nb_sb_per_mb - 1;
996*4882a593Smuzhiyun 	while (*nb_sb) {
997*4882a593Smuzhiyun 		/* Find the next candidate subblock */
998*4882a593Smuzhiyun 		while (sb_id >= 0 &&
999*4882a593Smuzhiyun 		       virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1))
1000*4882a593Smuzhiyun 			sb_id--;
1001*4882a593Smuzhiyun 		if (sb_id < 0)
1002*4882a593Smuzhiyun 			break;
1003*4882a593Smuzhiyun 		/* Try to unplug multiple subblocks at a time */
1004*4882a593Smuzhiyun 		count = 1;
1005*4882a593Smuzhiyun 		while (count < *nb_sb && sb_id > 0 &&
1006*4882a593Smuzhiyun 		       virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1007*4882a593Smuzhiyun 			count++;
1008*4882a593Smuzhiyun 			sb_id--;
1009*4882a593Smuzhiyun 		}
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 		rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
1012*4882a593Smuzhiyun 		if (rc)
1013*4882a593Smuzhiyun 			return rc;
1014*4882a593Smuzhiyun 		*nb_sb -= count;
1015*4882a593Smuzhiyun 		sb_id--;
1016*4882a593Smuzhiyun 	}
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	return 0;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun /*
1022*4882a593Smuzhiyun  * Unplug all plugged subblocks of an offline or not-added memory block.
1023*4882a593Smuzhiyun  *
1024*4882a593Smuzhiyun  * Will not modify the state of the memory block.
1025*4882a593Smuzhiyun  *
1026*4882a593Smuzhiyun  * Note: can fail after some subblocks were unplugged.
1027*4882a593Smuzhiyun  */
virtio_mem_mb_unplug(struct virtio_mem * vm,unsigned long mb_id)1028*4882a593Smuzhiyun static int virtio_mem_mb_unplug(struct virtio_mem *vm, unsigned long mb_id)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun 	uint64_t nb_sb = vm->nb_sb_per_mb;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	return virtio_mem_mb_unplug_any_sb(vm, mb_id, &nb_sb);
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun /*
1036*4882a593Smuzhiyun  * Prepare tracking data for the next memory block.
1037*4882a593Smuzhiyun  */
virtio_mem_prepare_next_mb(struct virtio_mem * vm,unsigned long * mb_id)1038*4882a593Smuzhiyun static int virtio_mem_prepare_next_mb(struct virtio_mem *vm,
1039*4882a593Smuzhiyun 				      unsigned long *mb_id)
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun 	int rc;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	if (vm->next_mb_id > vm->last_usable_mb_id)
1044*4882a593Smuzhiyun 		return -ENOSPC;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	/* Resize the state array if required. */
1047*4882a593Smuzhiyun 	rc = virtio_mem_mb_state_prepare_next_mb(vm);
1048*4882a593Smuzhiyun 	if (rc)
1049*4882a593Smuzhiyun 		return rc;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	/* Resize the subblock bitmap if required. */
1052*4882a593Smuzhiyun 	rc = virtio_mem_sb_bitmap_prepare_next_mb(vm);
1053*4882a593Smuzhiyun 	if (rc)
1054*4882a593Smuzhiyun 		return rc;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	vm->nb_mb_state[VIRTIO_MEM_MB_STATE_UNUSED]++;
1057*4882a593Smuzhiyun 	*mb_id = vm->next_mb_id++;
1058*4882a593Smuzhiyun 	return 0;
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun /*
1062*4882a593Smuzhiyun  * Don't add too many blocks that are not onlined yet to avoid running OOM.
1063*4882a593Smuzhiyun  */
virtio_mem_too_many_mb_offline(struct virtio_mem * vm)1064*4882a593Smuzhiyun static bool virtio_mem_too_many_mb_offline(struct virtio_mem *vm)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun 	unsigned long nb_offline;
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
1069*4882a593Smuzhiyun 		     vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
1070*4882a593Smuzhiyun 	return nb_offline >= VIRTIO_MEM_NB_OFFLINE_THRESHOLD;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun /*
1074*4882a593Smuzhiyun  * Try to plug the desired number of subblocks and add the memory block
1075*4882a593Smuzhiyun  * to Linux.
1076*4882a593Smuzhiyun  *
1077*4882a593Smuzhiyun  * Will modify the state of the memory block.
1078*4882a593Smuzhiyun  */
virtio_mem_mb_plug_and_add(struct virtio_mem * vm,unsigned long mb_id,uint64_t * nb_sb)1079*4882a593Smuzhiyun static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
1080*4882a593Smuzhiyun 				      unsigned long mb_id,
1081*4882a593Smuzhiyun 				      uint64_t *nb_sb)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	const int count = min_t(int, *nb_sb, vm->nb_sb_per_mb);
1084*4882a593Smuzhiyun 	int rc, rc2;
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!count))
1087*4882a593Smuzhiyun 		return -EINVAL;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	/*
1090*4882a593Smuzhiyun 	 * Plug the requested number of subblocks before adding it to linux,
1091*4882a593Smuzhiyun 	 * so that onlining will directly online all plugged subblocks.
1092*4882a593Smuzhiyun 	 */
1093*4882a593Smuzhiyun 	rc = virtio_mem_mb_plug_sb(vm, mb_id, 0, count);
1094*4882a593Smuzhiyun 	if (rc)
1095*4882a593Smuzhiyun 		return rc;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	/*
1098*4882a593Smuzhiyun 	 * Mark the block properly offline before adding it to Linux,
1099*4882a593Smuzhiyun 	 * so the memory notifiers will find the block in the right state.
1100*4882a593Smuzhiyun 	 */
1101*4882a593Smuzhiyun 	if (count == vm->nb_sb_per_mb)
1102*4882a593Smuzhiyun 		virtio_mem_mb_set_state(vm, mb_id,
1103*4882a593Smuzhiyun 					VIRTIO_MEM_MB_STATE_OFFLINE);
1104*4882a593Smuzhiyun 	else
1105*4882a593Smuzhiyun 		virtio_mem_mb_set_state(vm, mb_id,
1106*4882a593Smuzhiyun 					VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	/* Add the memory block to linux - if that fails, try to unplug. */
1109*4882a593Smuzhiyun 	rc = virtio_mem_mb_add(vm, mb_id);
1110*4882a593Smuzhiyun 	if (rc) {
1111*4882a593Smuzhiyun 		enum virtio_mem_mb_state new_state = VIRTIO_MEM_MB_STATE_UNUSED;
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 		dev_err(&vm->vdev->dev,
1114*4882a593Smuzhiyun 			"adding memory block %lu failed with %d\n", mb_id, rc);
1115*4882a593Smuzhiyun 		rc2 = virtio_mem_mb_unplug_sb(vm, mb_id, 0, count);
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 		/*
1118*4882a593Smuzhiyun 		 * TODO: Linux MM does not properly clean up yet in all cases
1119*4882a593Smuzhiyun 		 * where adding of memory failed - especially on -ENOMEM.
1120*4882a593Smuzhiyun 		 */
1121*4882a593Smuzhiyun 		if (rc2)
1122*4882a593Smuzhiyun 			new_state = VIRTIO_MEM_MB_STATE_PLUGGED;
1123*4882a593Smuzhiyun 		virtio_mem_mb_set_state(vm, mb_id, new_state);
1124*4882a593Smuzhiyun 		return rc;
1125*4882a593Smuzhiyun 	}
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	*nb_sb -= count;
1128*4882a593Smuzhiyun 	return 0;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun /*
1132*4882a593Smuzhiyun  * Try to plug the desired number of subblocks of a memory block that
1133*4882a593Smuzhiyun  * is already added to Linux.
1134*4882a593Smuzhiyun  *
1135*4882a593Smuzhiyun  * Will modify the state of the memory block.
1136*4882a593Smuzhiyun  *
1137*4882a593Smuzhiyun  * Note: Can fail after some subblocks were successfully plugged.
1138*4882a593Smuzhiyun  */
virtio_mem_mb_plug_any_sb(struct virtio_mem * vm,unsigned long mb_id,uint64_t * nb_sb,bool online)1139*4882a593Smuzhiyun static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
1140*4882a593Smuzhiyun 				     uint64_t *nb_sb, bool online)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun 	unsigned long pfn, nr_pages;
1143*4882a593Smuzhiyun 	int sb_id, count;
1144*4882a593Smuzhiyun 	int rc;
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!*nb_sb))
1147*4882a593Smuzhiyun 		return -EINVAL;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	while (*nb_sb) {
1150*4882a593Smuzhiyun 		sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id);
1151*4882a593Smuzhiyun 		if (sb_id >= vm->nb_sb_per_mb)
1152*4882a593Smuzhiyun 			break;
1153*4882a593Smuzhiyun 		count = 1;
1154*4882a593Smuzhiyun 		while (count < *nb_sb &&
1155*4882a593Smuzhiyun 		       sb_id + count < vm->nb_sb_per_mb &&
1156*4882a593Smuzhiyun 		       !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count,
1157*4882a593Smuzhiyun 						      1))
1158*4882a593Smuzhiyun 			count++;
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 		rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count);
1161*4882a593Smuzhiyun 		if (rc)
1162*4882a593Smuzhiyun 			return rc;
1163*4882a593Smuzhiyun 		*nb_sb -= count;
1164*4882a593Smuzhiyun 		if (!online)
1165*4882a593Smuzhiyun 			continue;
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 		/* fake-online the pages if the memory block is online */
1168*4882a593Smuzhiyun 		pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1169*4882a593Smuzhiyun 			       sb_id * vm->subblock_size);
1170*4882a593Smuzhiyun 		nr_pages = PFN_DOWN(count * vm->subblock_size);
1171*4882a593Smuzhiyun 		virtio_mem_fake_online(pfn, nr_pages);
1172*4882a593Smuzhiyun 	}
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1175*4882a593Smuzhiyun 		if (online)
1176*4882a593Smuzhiyun 			virtio_mem_mb_set_state(vm, mb_id,
1177*4882a593Smuzhiyun 						VIRTIO_MEM_MB_STATE_ONLINE);
1178*4882a593Smuzhiyun 		else
1179*4882a593Smuzhiyun 			virtio_mem_mb_set_state(vm, mb_id,
1180*4882a593Smuzhiyun 						VIRTIO_MEM_MB_STATE_OFFLINE);
1181*4882a593Smuzhiyun 	}
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	return 0;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun /*
1187*4882a593Smuzhiyun  * Try to plug the requested amount of memory.
1188*4882a593Smuzhiyun  */
virtio_mem_plug_request(struct virtio_mem * vm,uint64_t diff)1189*4882a593Smuzhiyun static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	uint64_t nb_sb = diff / vm->subblock_size;
1192*4882a593Smuzhiyun 	unsigned long mb_id;
1193*4882a593Smuzhiyun 	int rc;
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	if (!nb_sb)
1196*4882a593Smuzhiyun 		return 0;
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	/* Don't race with onlining/offlining */
1199*4882a593Smuzhiyun 	mutex_lock(&vm->hotplug_mutex);
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	/* Try to plug subblocks of partially plugged online blocks. */
1202*4882a593Smuzhiyun 	virtio_mem_for_each_mb_state(vm, mb_id,
1203*4882a593Smuzhiyun 				     VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
1204*4882a593Smuzhiyun 		rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, true);
1205*4882a593Smuzhiyun 		if (rc || !nb_sb)
1206*4882a593Smuzhiyun 			goto out_unlock;
1207*4882a593Smuzhiyun 		cond_resched();
1208*4882a593Smuzhiyun 	}
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	/* Try to plug subblocks of partially plugged offline blocks. */
1211*4882a593Smuzhiyun 	virtio_mem_for_each_mb_state(vm, mb_id,
1212*4882a593Smuzhiyun 				     VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1213*4882a593Smuzhiyun 		rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, false);
1214*4882a593Smuzhiyun 		if (rc || !nb_sb)
1215*4882a593Smuzhiyun 			goto out_unlock;
1216*4882a593Smuzhiyun 		cond_resched();
1217*4882a593Smuzhiyun 	}
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	/*
1220*4882a593Smuzhiyun 	 * We won't be working on online/offline memory blocks from this point,
1221*4882a593Smuzhiyun 	 * so we can't race with memory onlining/offlining. Drop the mutex.
1222*4882a593Smuzhiyun 	 */
1223*4882a593Smuzhiyun 	mutex_unlock(&vm->hotplug_mutex);
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	/* Try to plug and add unused blocks */
1226*4882a593Smuzhiyun 	virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED) {
1227*4882a593Smuzhiyun 		if (virtio_mem_too_many_mb_offline(vm))
1228*4882a593Smuzhiyun 			return -ENOSPC;
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 		rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
1231*4882a593Smuzhiyun 		if (rc || !nb_sb)
1232*4882a593Smuzhiyun 			return rc;
1233*4882a593Smuzhiyun 		cond_resched();
1234*4882a593Smuzhiyun 	}
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	/* Try to prepare, plug and add new blocks */
1237*4882a593Smuzhiyun 	while (nb_sb) {
1238*4882a593Smuzhiyun 		if (virtio_mem_too_many_mb_offline(vm))
1239*4882a593Smuzhiyun 			return -ENOSPC;
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 		rc = virtio_mem_prepare_next_mb(vm, &mb_id);
1242*4882a593Smuzhiyun 		if (rc)
1243*4882a593Smuzhiyun 			return rc;
1244*4882a593Smuzhiyun 		rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
1245*4882a593Smuzhiyun 		if (rc)
1246*4882a593Smuzhiyun 			return rc;
1247*4882a593Smuzhiyun 		cond_resched();
1248*4882a593Smuzhiyun 	}
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	return 0;
1251*4882a593Smuzhiyun out_unlock:
1252*4882a593Smuzhiyun 	mutex_unlock(&vm->hotplug_mutex);
1253*4882a593Smuzhiyun 	return rc;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun /*
1257*4882a593Smuzhiyun  * Unplug the desired number of plugged subblocks of an offline memory block.
1258*4882a593Smuzhiyun  * Will fail if any subblock cannot get unplugged (instead of skipping it).
1259*4882a593Smuzhiyun  *
1260*4882a593Smuzhiyun  * Will modify the state of the memory block. Might temporarily drop the
1261*4882a593Smuzhiyun  * hotplug_mutex.
1262*4882a593Smuzhiyun  *
1263*4882a593Smuzhiyun  * Note: Can fail after some subblocks were successfully unplugged.
1264*4882a593Smuzhiyun  */
virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem * vm,unsigned long mb_id,uint64_t * nb_sb)1265*4882a593Smuzhiyun static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
1266*4882a593Smuzhiyun 					       unsigned long mb_id,
1267*4882a593Smuzhiyun 					       uint64_t *nb_sb)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun 	int rc;
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb);
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	/* some subblocks might have been unplugged even on failure */
1274*4882a593Smuzhiyun 	if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb))
1275*4882a593Smuzhiyun 		virtio_mem_mb_set_state(vm, mb_id,
1276*4882a593Smuzhiyun 					VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
1277*4882a593Smuzhiyun 	if (rc)
1278*4882a593Smuzhiyun 		return rc;
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun 	if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1281*4882a593Smuzhiyun 		/*
1282*4882a593Smuzhiyun 		 * Remove the block from Linux - this should never fail.
1283*4882a593Smuzhiyun 		 * Hinder the block from getting onlined by marking it
1284*4882a593Smuzhiyun 		 * unplugged. Temporarily drop the mutex, so
1285*4882a593Smuzhiyun 		 * any pending GOING_ONLINE requests can be serviced/rejected.
1286*4882a593Smuzhiyun 		 */
1287*4882a593Smuzhiyun 		virtio_mem_mb_set_state(vm, mb_id,
1288*4882a593Smuzhiyun 					VIRTIO_MEM_MB_STATE_UNUSED);
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 		mutex_unlock(&vm->hotplug_mutex);
1291*4882a593Smuzhiyun 		rc = virtio_mem_mb_remove(vm, mb_id);
1292*4882a593Smuzhiyun 		BUG_ON(rc);
1293*4882a593Smuzhiyun 		mutex_lock(&vm->hotplug_mutex);
1294*4882a593Smuzhiyun 	}
1295*4882a593Smuzhiyun 	return 0;
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun /*
1299*4882a593Smuzhiyun  * Unplug the given plugged subblocks of an online memory block.
1300*4882a593Smuzhiyun  *
1301*4882a593Smuzhiyun  * Will modify the state of the memory block.
1302*4882a593Smuzhiyun  */
virtio_mem_mb_unplug_sb_online(struct virtio_mem * vm,unsigned long mb_id,int sb_id,int count)1303*4882a593Smuzhiyun static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
1304*4882a593Smuzhiyun 					  unsigned long mb_id, int sb_id,
1305*4882a593Smuzhiyun 					  int count)
1306*4882a593Smuzhiyun {
1307*4882a593Smuzhiyun 	const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count;
1308*4882a593Smuzhiyun 	unsigned long start_pfn;
1309*4882a593Smuzhiyun 	int rc;
1310*4882a593Smuzhiyun 	struct acr_info dummy;
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1313*4882a593Smuzhiyun 			     sb_id * vm->subblock_size);
1314*4882a593Smuzhiyun 	rc = alloc_contig_range(start_pfn, start_pfn + nr_pages,
1315*4882a593Smuzhiyun 				MIGRATE_MOVABLE, GFP_KERNEL, &dummy);
1316*4882a593Smuzhiyun 	if (rc == -ENOMEM)
1317*4882a593Smuzhiyun 		/* whoops, out of memory */
1318*4882a593Smuzhiyun 		return rc;
1319*4882a593Smuzhiyun 	if (rc)
1320*4882a593Smuzhiyun 		return -EBUSY;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	/* Mark it as fake-offline before unplugging it */
1323*4882a593Smuzhiyun 	virtio_mem_set_fake_offline(start_pfn, nr_pages, true);
1324*4882a593Smuzhiyun 	adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	/* Try to unplug the allocated memory */
1327*4882a593Smuzhiyun 	rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
1328*4882a593Smuzhiyun 	if (rc) {
1329*4882a593Smuzhiyun 		/* Return the memory to the buddy. */
1330*4882a593Smuzhiyun 		virtio_mem_fake_online(start_pfn, nr_pages);
1331*4882a593Smuzhiyun 		return rc;
1332*4882a593Smuzhiyun 	}
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 	virtio_mem_mb_set_state(vm, mb_id,
1335*4882a593Smuzhiyun 				VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
1336*4882a593Smuzhiyun 	return 0;
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun /*
1340*4882a593Smuzhiyun  * Unplug the desired number of plugged subblocks of an online memory block.
1341*4882a593Smuzhiyun  * Will skip subblock that are busy.
1342*4882a593Smuzhiyun  *
1343*4882a593Smuzhiyun  * Will modify the state of the memory block. Might temporarily drop the
1344*4882a593Smuzhiyun  * hotplug_mutex.
1345*4882a593Smuzhiyun  *
1346*4882a593Smuzhiyun  * Note: Can fail after some subblocks were successfully unplugged. Can
1347*4882a593Smuzhiyun  *       return 0 even if subblocks were busy and could not get unplugged.
1348*4882a593Smuzhiyun  */
virtio_mem_mb_unplug_any_sb_online(struct virtio_mem * vm,unsigned long mb_id,uint64_t * nb_sb)1349*4882a593Smuzhiyun static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
1350*4882a593Smuzhiyun 					      unsigned long mb_id,
1351*4882a593Smuzhiyun 					      uint64_t *nb_sb)
1352*4882a593Smuzhiyun {
1353*4882a593Smuzhiyun 	int rc, sb_id;
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 	/* If possible, try to unplug the complete block in one shot. */
1356*4882a593Smuzhiyun 	if (*nb_sb >= vm->nb_sb_per_mb &&
1357*4882a593Smuzhiyun 	    virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1358*4882a593Smuzhiyun 		rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0,
1359*4882a593Smuzhiyun 						    vm->nb_sb_per_mb);
1360*4882a593Smuzhiyun 		if (!rc) {
1361*4882a593Smuzhiyun 			*nb_sb -= vm->nb_sb_per_mb;
1362*4882a593Smuzhiyun 			goto unplugged;
1363*4882a593Smuzhiyun 		} else if (rc != -EBUSY)
1364*4882a593Smuzhiyun 			return rc;
1365*4882a593Smuzhiyun 	}
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	/* Fallback to single subblocks. */
1368*4882a593Smuzhiyun 	for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
1369*4882a593Smuzhiyun 		/* Find the next candidate subblock */
1370*4882a593Smuzhiyun 		while (sb_id >= 0 &&
1371*4882a593Smuzhiyun 		       !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
1372*4882a593Smuzhiyun 			sb_id--;
1373*4882a593Smuzhiyun 		if (sb_id < 0)
1374*4882a593Smuzhiyun 			break;
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 		rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, sb_id, 1);
1377*4882a593Smuzhiyun 		if (rc == -EBUSY)
1378*4882a593Smuzhiyun 			continue;
1379*4882a593Smuzhiyun 		else if (rc)
1380*4882a593Smuzhiyun 			return rc;
1381*4882a593Smuzhiyun 		*nb_sb -= 1;
1382*4882a593Smuzhiyun 	}
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun unplugged:
1385*4882a593Smuzhiyun 	/*
1386*4882a593Smuzhiyun 	 * Once all subblocks of a memory block were unplugged, offline and
1387*4882a593Smuzhiyun 	 * remove it. This will usually not fail, as no memory is in use
1388*4882a593Smuzhiyun 	 * anymore - however some other notifiers might NACK the request.
1389*4882a593Smuzhiyun 	 */
1390*4882a593Smuzhiyun 	if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1391*4882a593Smuzhiyun 		mutex_unlock(&vm->hotplug_mutex);
1392*4882a593Smuzhiyun 		rc = virtio_mem_mb_offline_and_remove(vm, mb_id);
1393*4882a593Smuzhiyun 		mutex_lock(&vm->hotplug_mutex);
1394*4882a593Smuzhiyun 		if (!rc)
1395*4882a593Smuzhiyun 			virtio_mem_mb_set_state(vm, mb_id,
1396*4882a593Smuzhiyun 						VIRTIO_MEM_MB_STATE_UNUSED);
1397*4882a593Smuzhiyun 	}
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	return 0;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun /*
1403*4882a593Smuzhiyun  * Try to unplug the requested amount of memory.
1404*4882a593Smuzhiyun  */
virtio_mem_unplug_request(struct virtio_mem * vm,uint64_t diff)1405*4882a593Smuzhiyun static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
1406*4882a593Smuzhiyun {
1407*4882a593Smuzhiyun 	uint64_t nb_sb = diff / vm->subblock_size;
1408*4882a593Smuzhiyun 	unsigned long mb_id;
1409*4882a593Smuzhiyun 	int rc;
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	if (!nb_sb)
1412*4882a593Smuzhiyun 		return 0;
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	/*
1415*4882a593Smuzhiyun 	 * We'll drop the mutex a couple of times when it is safe to do so.
1416*4882a593Smuzhiyun 	 * This might result in some blocks switching the state (online/offline)
1417*4882a593Smuzhiyun 	 * and we could miss them in this run - we will retry again later.
1418*4882a593Smuzhiyun 	 */
1419*4882a593Smuzhiyun 	mutex_lock(&vm->hotplug_mutex);
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	/* Try to unplug subblocks of partially plugged offline blocks. */
1422*4882a593Smuzhiyun 	virtio_mem_for_each_mb_state_rev(vm, mb_id,
1423*4882a593Smuzhiyun 					 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1424*4882a593Smuzhiyun 		rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
1425*4882a593Smuzhiyun 							 &nb_sb);
1426*4882a593Smuzhiyun 		if (rc || !nb_sb)
1427*4882a593Smuzhiyun 			goto out_unlock;
1428*4882a593Smuzhiyun 		cond_resched();
1429*4882a593Smuzhiyun 	}
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	/* Try to unplug subblocks of plugged offline blocks. */
1432*4882a593Smuzhiyun 	virtio_mem_for_each_mb_state_rev(vm, mb_id,
1433*4882a593Smuzhiyun 					 VIRTIO_MEM_MB_STATE_OFFLINE) {
1434*4882a593Smuzhiyun 		rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
1435*4882a593Smuzhiyun 							 &nb_sb);
1436*4882a593Smuzhiyun 		if (rc || !nb_sb)
1437*4882a593Smuzhiyun 			goto out_unlock;
1438*4882a593Smuzhiyun 		cond_resched();
1439*4882a593Smuzhiyun 	}
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	if (!unplug_online) {
1442*4882a593Smuzhiyun 		mutex_unlock(&vm->hotplug_mutex);
1443*4882a593Smuzhiyun 		return 0;
1444*4882a593Smuzhiyun 	}
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 	/* Try to unplug subblocks of partially plugged online blocks. */
1447*4882a593Smuzhiyun 	virtio_mem_for_each_mb_state_rev(vm, mb_id,
1448*4882a593Smuzhiyun 					 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
1449*4882a593Smuzhiyun 		rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
1450*4882a593Smuzhiyun 							&nb_sb);
1451*4882a593Smuzhiyun 		if (rc || !nb_sb)
1452*4882a593Smuzhiyun 			goto out_unlock;
1453*4882a593Smuzhiyun 		mutex_unlock(&vm->hotplug_mutex);
1454*4882a593Smuzhiyun 		cond_resched();
1455*4882a593Smuzhiyun 		mutex_lock(&vm->hotplug_mutex);
1456*4882a593Smuzhiyun 	}
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	/* Try to unplug subblocks of plugged online blocks. */
1459*4882a593Smuzhiyun 	virtio_mem_for_each_mb_state_rev(vm, mb_id,
1460*4882a593Smuzhiyun 					 VIRTIO_MEM_MB_STATE_ONLINE) {
1461*4882a593Smuzhiyun 		rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
1462*4882a593Smuzhiyun 							&nb_sb);
1463*4882a593Smuzhiyun 		if (rc || !nb_sb)
1464*4882a593Smuzhiyun 			goto out_unlock;
1465*4882a593Smuzhiyun 		mutex_unlock(&vm->hotplug_mutex);
1466*4882a593Smuzhiyun 		cond_resched();
1467*4882a593Smuzhiyun 		mutex_lock(&vm->hotplug_mutex);
1468*4882a593Smuzhiyun 	}
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	mutex_unlock(&vm->hotplug_mutex);
1471*4882a593Smuzhiyun 	return nb_sb ? -EBUSY : 0;
1472*4882a593Smuzhiyun out_unlock:
1473*4882a593Smuzhiyun 	mutex_unlock(&vm->hotplug_mutex);
1474*4882a593Smuzhiyun 	return rc;
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun /*
1478*4882a593Smuzhiyun  * Try to unplug all blocks that couldn't be unplugged before, for example,
1479*4882a593Smuzhiyun  * because the hypervisor was busy.
1480*4882a593Smuzhiyun  */
virtio_mem_unplug_pending_mb(struct virtio_mem * vm)1481*4882a593Smuzhiyun static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
1482*4882a593Smuzhiyun {
1483*4882a593Smuzhiyun 	unsigned long mb_id;
1484*4882a593Smuzhiyun 	int rc;
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_PLUGGED) {
1487*4882a593Smuzhiyun 		rc = virtio_mem_mb_unplug(vm, mb_id);
1488*4882a593Smuzhiyun 		if (rc)
1489*4882a593Smuzhiyun 			return rc;
1490*4882a593Smuzhiyun 		virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
1491*4882a593Smuzhiyun 	}
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	return 0;
1494*4882a593Smuzhiyun }
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun /*
1497*4882a593Smuzhiyun  * Update all parts of the config that could have changed.
1498*4882a593Smuzhiyun  */
virtio_mem_refresh_config(struct virtio_mem * vm)1499*4882a593Smuzhiyun static void virtio_mem_refresh_config(struct virtio_mem *vm)
1500*4882a593Smuzhiyun {
1501*4882a593Smuzhiyun 	const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
1502*4882a593Smuzhiyun 	uint64_t new_plugged_size, usable_region_size, end_addr;
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	/* the plugged_size is just a reflection of what _we_ did previously */
1505*4882a593Smuzhiyun 	virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1506*4882a593Smuzhiyun 			&new_plugged_size);
1507*4882a593Smuzhiyun 	if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
1508*4882a593Smuzhiyun 		vm->plugged_size = new_plugged_size;
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	/* calculate the last usable memory block id */
1511*4882a593Smuzhiyun 	virtio_cread_le(vm->vdev, struct virtio_mem_config,
1512*4882a593Smuzhiyun 			usable_region_size, &usable_region_size);
1513*4882a593Smuzhiyun 	end_addr = vm->addr + usable_region_size;
1514*4882a593Smuzhiyun 	end_addr = min(end_addr, phys_limit);
1515*4882a593Smuzhiyun 	vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 	/* see if there is a request to change the size */
1518*4882a593Smuzhiyun 	virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
1519*4882a593Smuzhiyun 			&vm->requested_size);
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 	dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
1522*4882a593Smuzhiyun 	dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun /*
1526*4882a593Smuzhiyun  * Workqueue function for handling plug/unplug requests and config updates.
1527*4882a593Smuzhiyun  */
virtio_mem_run_wq(struct work_struct * work)1528*4882a593Smuzhiyun static void virtio_mem_run_wq(struct work_struct *work)
1529*4882a593Smuzhiyun {
1530*4882a593Smuzhiyun 	struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
1531*4882a593Smuzhiyun 	uint64_t diff;
1532*4882a593Smuzhiyun 	int rc;
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 	hrtimer_cancel(&vm->retry_timer);
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 	if (vm->broken)
1537*4882a593Smuzhiyun 		return;
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun retry:
1540*4882a593Smuzhiyun 	rc = 0;
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun 	/* Make sure we start with a clean state if there are leftovers. */
1543*4882a593Smuzhiyun 	if (unlikely(vm->unplug_all_required))
1544*4882a593Smuzhiyun 		rc = virtio_mem_send_unplug_all_request(vm);
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	if (atomic_read(&vm->config_changed)) {
1547*4882a593Smuzhiyun 		atomic_set(&vm->config_changed, 0);
1548*4882a593Smuzhiyun 		virtio_mem_refresh_config(vm);
1549*4882a593Smuzhiyun 	}
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	/* Unplug any leftovers from previous runs */
1552*4882a593Smuzhiyun 	if (!rc)
1553*4882a593Smuzhiyun 		rc = virtio_mem_unplug_pending_mb(vm);
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	if (!rc && vm->requested_size != vm->plugged_size) {
1556*4882a593Smuzhiyun 		if (vm->requested_size > vm->plugged_size) {
1557*4882a593Smuzhiyun 			diff = vm->requested_size - vm->plugged_size;
1558*4882a593Smuzhiyun 			rc = virtio_mem_plug_request(vm, diff);
1559*4882a593Smuzhiyun 		} else {
1560*4882a593Smuzhiyun 			diff = vm->plugged_size - vm->requested_size;
1561*4882a593Smuzhiyun 			rc = virtio_mem_unplug_request(vm, diff);
1562*4882a593Smuzhiyun 		}
1563*4882a593Smuzhiyun 	}
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun 	switch (rc) {
1566*4882a593Smuzhiyun 	case 0:
1567*4882a593Smuzhiyun 		vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
1568*4882a593Smuzhiyun 		break;
1569*4882a593Smuzhiyun 	case -ENOSPC:
1570*4882a593Smuzhiyun 		/*
1571*4882a593Smuzhiyun 		 * We cannot add any more memory (alignment, physical limit)
1572*4882a593Smuzhiyun 		 * or we have too many offline memory blocks.
1573*4882a593Smuzhiyun 		 */
1574*4882a593Smuzhiyun 		break;
1575*4882a593Smuzhiyun 	case -ETXTBSY:
1576*4882a593Smuzhiyun 		/*
1577*4882a593Smuzhiyun 		 * The hypervisor cannot process our request right now
1578*4882a593Smuzhiyun 		 * (e.g., out of memory, migrating);
1579*4882a593Smuzhiyun 		 */
1580*4882a593Smuzhiyun 	case -EBUSY:
1581*4882a593Smuzhiyun 		/*
1582*4882a593Smuzhiyun 		 * We cannot free up any memory to unplug it (all plugged memory
1583*4882a593Smuzhiyun 		 * is busy).
1584*4882a593Smuzhiyun 		 */
1585*4882a593Smuzhiyun 	case -ENOMEM:
1586*4882a593Smuzhiyun 		/* Out of memory, try again later. */
1587*4882a593Smuzhiyun 		hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
1588*4882a593Smuzhiyun 			      HRTIMER_MODE_REL);
1589*4882a593Smuzhiyun 		break;
1590*4882a593Smuzhiyun 	case -EAGAIN:
1591*4882a593Smuzhiyun 		/* Retry immediately (e.g., the config changed). */
1592*4882a593Smuzhiyun 		goto retry;
1593*4882a593Smuzhiyun 	default:
1594*4882a593Smuzhiyun 		/* Unknown error, mark as broken */
1595*4882a593Smuzhiyun 		dev_err(&vm->vdev->dev,
1596*4882a593Smuzhiyun 			"unknown error, marking device broken: %d\n", rc);
1597*4882a593Smuzhiyun 		vm->broken = true;
1598*4882a593Smuzhiyun 	}
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun 
virtio_mem_timer_expired(struct hrtimer * timer)1601*4882a593Smuzhiyun static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
1602*4882a593Smuzhiyun {
1603*4882a593Smuzhiyun 	struct virtio_mem *vm = container_of(timer, struct virtio_mem,
1604*4882a593Smuzhiyun 					     retry_timer);
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	virtio_mem_retry(vm);
1607*4882a593Smuzhiyun 	vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
1608*4882a593Smuzhiyun 				   VIRTIO_MEM_RETRY_TIMER_MAX_MS);
1609*4882a593Smuzhiyun 	return HRTIMER_NORESTART;
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun 
virtio_mem_handle_response(struct virtqueue * vq)1612*4882a593Smuzhiyun static void virtio_mem_handle_response(struct virtqueue *vq)
1613*4882a593Smuzhiyun {
1614*4882a593Smuzhiyun 	struct virtio_mem *vm = vq->vdev->priv;
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 	wake_up(&vm->host_resp);
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun 
virtio_mem_init_vq(struct virtio_mem * vm)1619*4882a593Smuzhiyun static int virtio_mem_init_vq(struct virtio_mem *vm)
1620*4882a593Smuzhiyun {
1621*4882a593Smuzhiyun 	struct virtqueue *vq;
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
1624*4882a593Smuzhiyun 				   "guest-request");
1625*4882a593Smuzhiyun 	if (IS_ERR(vq))
1626*4882a593Smuzhiyun 		return PTR_ERR(vq);
1627*4882a593Smuzhiyun 	vm->vq = vq;
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun 	return 0;
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun 
virtio_mem_init(struct virtio_mem * vm)1632*4882a593Smuzhiyun static int virtio_mem_init(struct virtio_mem *vm)
1633*4882a593Smuzhiyun {
1634*4882a593Smuzhiyun 	const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
1635*4882a593Smuzhiyun 	uint16_t node_id;
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	if (!vm->vdev->config->get) {
1638*4882a593Smuzhiyun 		dev_err(&vm->vdev->dev, "config access disabled\n");
1639*4882a593Smuzhiyun 		return -EINVAL;
1640*4882a593Smuzhiyun 	}
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 	/*
1643*4882a593Smuzhiyun 	 * We don't want to (un)plug or reuse any memory when in kdump. The
1644*4882a593Smuzhiyun 	 * memory is still accessible (but not mapped).
1645*4882a593Smuzhiyun 	 */
1646*4882a593Smuzhiyun 	if (is_kdump_kernel()) {
1647*4882a593Smuzhiyun 		dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
1648*4882a593Smuzhiyun 		return -EBUSY;
1649*4882a593Smuzhiyun 	}
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	/* Fetch all properties that can't change. */
1652*4882a593Smuzhiyun 	virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1653*4882a593Smuzhiyun 			&vm->plugged_size);
1654*4882a593Smuzhiyun 	virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
1655*4882a593Smuzhiyun 			&vm->device_block_size);
1656*4882a593Smuzhiyun 	virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
1657*4882a593Smuzhiyun 			&node_id);
1658*4882a593Smuzhiyun 	vm->nid = virtio_mem_translate_node_id(vm, node_id);
1659*4882a593Smuzhiyun 	virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
1660*4882a593Smuzhiyun 	virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
1661*4882a593Smuzhiyun 			&vm->region_size);
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	/*
1664*4882a593Smuzhiyun 	 * We always hotplug memory in memory block granularity. This way,
1665*4882a593Smuzhiyun 	 * we have to wait for exactly one memory block to online.
1666*4882a593Smuzhiyun 	 */
1667*4882a593Smuzhiyun 	if (vm->device_block_size > memory_block_size_bytes()) {
1668*4882a593Smuzhiyun 		dev_err(&vm->vdev->dev,
1669*4882a593Smuzhiyun 			"The block size is not supported (too big).\n");
1670*4882a593Smuzhiyun 		return -EINVAL;
1671*4882a593Smuzhiyun 	}
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	/* bad device setup - warn only */
1674*4882a593Smuzhiyun 	if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
1675*4882a593Smuzhiyun 		dev_warn(&vm->vdev->dev,
1676*4882a593Smuzhiyun 			 "The alignment of the physical start address can make some memory unusable.\n");
1677*4882a593Smuzhiyun 	if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
1678*4882a593Smuzhiyun 		dev_warn(&vm->vdev->dev,
1679*4882a593Smuzhiyun 			 "The alignment of the physical end address can make some memory unusable.\n");
1680*4882a593Smuzhiyun 	if (vm->addr + vm->region_size > phys_limit)
1681*4882a593Smuzhiyun 		dev_warn(&vm->vdev->dev,
1682*4882a593Smuzhiyun 			 "Some memory is not addressable. This can make some memory unusable.\n");
1683*4882a593Smuzhiyun 
1684*4882a593Smuzhiyun 	/*
1685*4882a593Smuzhiyun 	 * Calculate the subblock size:
1686*4882a593Smuzhiyun 	 * - At least MAX_ORDER - 1 / pageblock_order.
1687*4882a593Smuzhiyun 	 * - At least the device block size.
1688*4882a593Smuzhiyun 	 * In the worst case, a single subblock per memory block.
1689*4882a593Smuzhiyun 	 */
1690*4882a593Smuzhiyun 	vm->subblock_size = PAGE_SIZE * 1ul << max_t(uint32_t, MAX_ORDER - 1,
1691*4882a593Smuzhiyun 						     pageblock_order);
1692*4882a593Smuzhiyun 	vm->subblock_size = max_t(uint64_t, vm->device_block_size,
1693*4882a593Smuzhiyun 				  vm->subblock_size);
1694*4882a593Smuzhiyun 	vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size;
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 	/* Round up to the next full memory block */
1697*4882a593Smuzhiyun 	vm->first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 +
1698*4882a593Smuzhiyun 						   memory_block_size_bytes());
1699*4882a593Smuzhiyun 	vm->next_mb_id = vm->first_mb_id;
1700*4882a593Smuzhiyun 	vm->last_mb_id = virtio_mem_phys_to_mb_id(vm->addr +
1701*4882a593Smuzhiyun 			 vm->region_size) - 1;
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
1704*4882a593Smuzhiyun 	dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
1705*4882a593Smuzhiyun 	dev_info(&vm->vdev->dev, "device block size: 0x%llx",
1706*4882a593Smuzhiyun 		 (unsigned long long)vm->device_block_size);
1707*4882a593Smuzhiyun 	dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
1708*4882a593Smuzhiyun 		 memory_block_size_bytes());
1709*4882a593Smuzhiyun 	dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
1710*4882a593Smuzhiyun 		 (unsigned long long)vm->subblock_size);
1711*4882a593Smuzhiyun 	if (vm->nid != NUMA_NO_NODE)
1712*4882a593Smuzhiyun 		dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
1713*4882a593Smuzhiyun 
1714*4882a593Smuzhiyun 	return 0;
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun 
virtio_mem_create_resource(struct virtio_mem * vm)1717*4882a593Smuzhiyun static int virtio_mem_create_resource(struct virtio_mem *vm)
1718*4882a593Smuzhiyun {
1719*4882a593Smuzhiyun 	/*
1720*4882a593Smuzhiyun 	 * When force-unloading the driver and removing the device, we
1721*4882a593Smuzhiyun 	 * could have a garbage pointer. Duplicate the string.
1722*4882a593Smuzhiyun 	 */
1723*4882a593Smuzhiyun 	const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 	if (!name)
1726*4882a593Smuzhiyun 		return -ENOMEM;
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
1729*4882a593Smuzhiyun 						   name, IORESOURCE_SYSTEM_RAM);
1730*4882a593Smuzhiyun 	if (!vm->parent_resource) {
1731*4882a593Smuzhiyun 		kfree(name);
1732*4882a593Smuzhiyun 		dev_warn(&vm->vdev->dev, "could not reserve device region\n");
1733*4882a593Smuzhiyun 		dev_info(&vm->vdev->dev,
1734*4882a593Smuzhiyun 			 "reloading the driver is not supported\n");
1735*4882a593Smuzhiyun 		return -EBUSY;
1736*4882a593Smuzhiyun 	}
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	/* The memory is not actually busy - make add_memory() work. */
1739*4882a593Smuzhiyun 	vm->parent_resource->flags &= ~IORESOURCE_BUSY;
1740*4882a593Smuzhiyun 	return 0;
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun 
virtio_mem_delete_resource(struct virtio_mem * vm)1743*4882a593Smuzhiyun static void virtio_mem_delete_resource(struct virtio_mem *vm)
1744*4882a593Smuzhiyun {
1745*4882a593Smuzhiyun 	const char *name;
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	if (!vm->parent_resource)
1748*4882a593Smuzhiyun 		return;
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	name = vm->parent_resource->name;
1751*4882a593Smuzhiyun 	release_resource(vm->parent_resource);
1752*4882a593Smuzhiyun 	kfree(vm->parent_resource);
1753*4882a593Smuzhiyun 	kfree(name);
1754*4882a593Smuzhiyun 	vm->parent_resource = NULL;
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun 
virtio_mem_probe(struct virtio_device * vdev)1757*4882a593Smuzhiyun static int virtio_mem_probe(struct virtio_device *vdev)
1758*4882a593Smuzhiyun {
1759*4882a593Smuzhiyun 	struct virtio_mem *vm;
1760*4882a593Smuzhiyun 	int rc;
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
1763*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1766*4882a593Smuzhiyun 	if (!vm)
1767*4882a593Smuzhiyun 		return -ENOMEM;
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 	init_waitqueue_head(&vm->host_resp);
1770*4882a593Smuzhiyun 	vm->vdev = vdev;
1771*4882a593Smuzhiyun 	INIT_WORK(&vm->wq, virtio_mem_run_wq);
1772*4882a593Smuzhiyun 	mutex_init(&vm->hotplug_mutex);
1773*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vm->next);
1774*4882a593Smuzhiyun 	spin_lock_init(&vm->removal_lock);
1775*4882a593Smuzhiyun 	hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1776*4882a593Smuzhiyun 	vm->retry_timer.function = virtio_mem_timer_expired;
1777*4882a593Smuzhiyun 	vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 	/* register the virtqueue */
1780*4882a593Smuzhiyun 	rc = virtio_mem_init_vq(vm);
1781*4882a593Smuzhiyun 	if (rc)
1782*4882a593Smuzhiyun 		goto out_free_vm;
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun 	/* initialize the device by querying the config */
1785*4882a593Smuzhiyun 	rc = virtio_mem_init(vm);
1786*4882a593Smuzhiyun 	if (rc)
1787*4882a593Smuzhiyun 		goto out_del_vq;
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	/* create the parent resource for all memory */
1790*4882a593Smuzhiyun 	rc = virtio_mem_create_resource(vm);
1791*4882a593Smuzhiyun 	if (rc)
1792*4882a593Smuzhiyun 		goto out_del_vq;
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 	/*
1795*4882a593Smuzhiyun 	 * If we still have memory plugged, we have to unplug all memory first.
1796*4882a593Smuzhiyun 	 * Registering our parent resource makes sure that this memory isn't
1797*4882a593Smuzhiyun 	 * actually in use (e.g., trying to reload the driver).
1798*4882a593Smuzhiyun 	 */
1799*4882a593Smuzhiyun 	if (vm->plugged_size) {
1800*4882a593Smuzhiyun 		vm->unplug_all_required = 1;
1801*4882a593Smuzhiyun 		dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
1802*4882a593Smuzhiyun 	}
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun 	/* register callbacks */
1805*4882a593Smuzhiyun 	vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
1806*4882a593Smuzhiyun 	rc = register_memory_notifier(&vm->memory_notifier);
1807*4882a593Smuzhiyun 	if (rc)
1808*4882a593Smuzhiyun 		goto out_del_resource;
1809*4882a593Smuzhiyun 	rc = register_virtio_mem_device(vm);
1810*4882a593Smuzhiyun 	if (rc)
1811*4882a593Smuzhiyun 		goto out_unreg_mem;
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 	virtio_device_ready(vdev);
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	/* trigger a config update to start processing the requested_size */
1816*4882a593Smuzhiyun 	atomic_set(&vm->config_changed, 1);
1817*4882a593Smuzhiyun 	queue_work(system_freezable_wq, &vm->wq);
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 	return 0;
1820*4882a593Smuzhiyun out_unreg_mem:
1821*4882a593Smuzhiyun 	unregister_memory_notifier(&vm->memory_notifier);
1822*4882a593Smuzhiyun out_del_resource:
1823*4882a593Smuzhiyun 	virtio_mem_delete_resource(vm);
1824*4882a593Smuzhiyun out_del_vq:
1825*4882a593Smuzhiyun 	vdev->config->del_vqs(vdev);
1826*4882a593Smuzhiyun out_free_vm:
1827*4882a593Smuzhiyun 	kfree(vm);
1828*4882a593Smuzhiyun 	vdev->priv = NULL;
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	return rc;
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun 
virtio_mem_remove(struct virtio_device * vdev)1833*4882a593Smuzhiyun static void virtio_mem_remove(struct virtio_device *vdev)
1834*4882a593Smuzhiyun {
1835*4882a593Smuzhiyun 	struct virtio_mem *vm = vdev->priv;
1836*4882a593Smuzhiyun 	unsigned long mb_id;
1837*4882a593Smuzhiyun 	int rc;
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	/*
1840*4882a593Smuzhiyun 	 * Make sure the workqueue won't be triggered anymore and no memory
1841*4882a593Smuzhiyun 	 * blocks can be onlined/offlined until we're finished here.
1842*4882a593Smuzhiyun 	 */
1843*4882a593Smuzhiyun 	mutex_lock(&vm->hotplug_mutex);
1844*4882a593Smuzhiyun 	spin_lock_irq(&vm->removal_lock);
1845*4882a593Smuzhiyun 	vm->removing = true;
1846*4882a593Smuzhiyun 	spin_unlock_irq(&vm->removal_lock);
1847*4882a593Smuzhiyun 	mutex_unlock(&vm->hotplug_mutex);
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 	/* wait until the workqueue stopped */
1850*4882a593Smuzhiyun 	cancel_work_sync(&vm->wq);
1851*4882a593Smuzhiyun 	hrtimer_cancel(&vm->retry_timer);
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 	/*
1854*4882a593Smuzhiyun 	 * After we unregistered our callbacks, user space can online partially
1855*4882a593Smuzhiyun 	 * plugged offline blocks. Make sure to remove them.
1856*4882a593Smuzhiyun 	 */
1857*4882a593Smuzhiyun 	virtio_mem_for_each_mb_state(vm, mb_id,
1858*4882a593Smuzhiyun 				     VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1859*4882a593Smuzhiyun 		rc = virtio_mem_mb_remove(vm, mb_id);
1860*4882a593Smuzhiyun 		BUG_ON(rc);
1861*4882a593Smuzhiyun 		virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
1862*4882a593Smuzhiyun 	}
1863*4882a593Smuzhiyun 	/*
1864*4882a593Smuzhiyun 	 * After we unregistered our callbacks, user space can no longer
1865*4882a593Smuzhiyun 	 * offline partially plugged online memory blocks. No need to worry
1866*4882a593Smuzhiyun 	 * about them.
1867*4882a593Smuzhiyun 	 */
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun 	/* unregister callbacks */
1870*4882a593Smuzhiyun 	unregister_virtio_mem_device(vm);
1871*4882a593Smuzhiyun 	unregister_memory_notifier(&vm->memory_notifier);
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	/*
1874*4882a593Smuzhiyun 	 * There is no way we could reliably remove all memory we have added to
1875*4882a593Smuzhiyun 	 * the system. And there is no way to stop the driver/device from going
1876*4882a593Smuzhiyun 	 * away. Warn at least.
1877*4882a593Smuzhiyun 	 */
1878*4882a593Smuzhiyun 	if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] ||
1879*4882a593Smuzhiyun 	    vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
1880*4882a593Smuzhiyun 	    vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
1881*4882a593Smuzhiyun 	    vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL]) {
1882*4882a593Smuzhiyun 		dev_warn(&vdev->dev, "device still has system memory added\n");
1883*4882a593Smuzhiyun 	} else {
1884*4882a593Smuzhiyun 		virtio_mem_delete_resource(vm);
1885*4882a593Smuzhiyun 		kfree_const(vm->resource_name);
1886*4882a593Smuzhiyun 	}
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun 	/* remove all tracking data - no locking needed */
1889*4882a593Smuzhiyun 	vfree(vm->mb_state);
1890*4882a593Smuzhiyun 	vfree(vm->sb_bitmap);
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun 	/* reset the device and cleanup the queues */
1893*4882a593Smuzhiyun 	vdev->config->reset(vdev);
1894*4882a593Smuzhiyun 	vdev->config->del_vqs(vdev);
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 	kfree(vm);
1897*4882a593Smuzhiyun 	vdev->priv = NULL;
1898*4882a593Smuzhiyun }
1899*4882a593Smuzhiyun 
virtio_mem_config_changed(struct virtio_device * vdev)1900*4882a593Smuzhiyun static void virtio_mem_config_changed(struct virtio_device *vdev)
1901*4882a593Smuzhiyun {
1902*4882a593Smuzhiyun 	struct virtio_mem *vm = vdev->priv;
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 	atomic_set(&vm->config_changed, 1);
1905*4882a593Smuzhiyun 	virtio_mem_retry(vm);
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
virtio_mem_freeze(struct virtio_device * vdev)1909*4882a593Smuzhiyun static int virtio_mem_freeze(struct virtio_device *vdev)
1910*4882a593Smuzhiyun {
1911*4882a593Smuzhiyun 	/*
1912*4882a593Smuzhiyun 	 * When restarting the VM, all memory is usually unplugged. Don't
1913*4882a593Smuzhiyun 	 * allow to suspend/hibernate.
1914*4882a593Smuzhiyun 	 */
1915*4882a593Smuzhiyun 	dev_err(&vdev->dev, "save/restore not supported.\n");
1916*4882a593Smuzhiyun 	return -EPERM;
1917*4882a593Smuzhiyun }
1918*4882a593Smuzhiyun 
virtio_mem_restore(struct virtio_device * vdev)1919*4882a593Smuzhiyun static int virtio_mem_restore(struct virtio_device *vdev)
1920*4882a593Smuzhiyun {
1921*4882a593Smuzhiyun 	return -EPERM;
1922*4882a593Smuzhiyun }
1923*4882a593Smuzhiyun #endif
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun static unsigned int virtio_mem_features[] = {
1926*4882a593Smuzhiyun #if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
1927*4882a593Smuzhiyun 	VIRTIO_MEM_F_ACPI_PXM,
1928*4882a593Smuzhiyun #endif
1929*4882a593Smuzhiyun };
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun static const struct virtio_device_id virtio_mem_id_table[] = {
1932*4882a593Smuzhiyun 	{ VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
1933*4882a593Smuzhiyun 	{ 0 },
1934*4882a593Smuzhiyun };
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun static struct virtio_driver virtio_mem_driver = {
1937*4882a593Smuzhiyun 	.feature_table = virtio_mem_features,
1938*4882a593Smuzhiyun 	.feature_table_size = ARRAY_SIZE(virtio_mem_features),
1939*4882a593Smuzhiyun 	.driver.name = KBUILD_MODNAME,
1940*4882a593Smuzhiyun 	.driver.owner = THIS_MODULE,
1941*4882a593Smuzhiyun 	.id_table = virtio_mem_id_table,
1942*4882a593Smuzhiyun 	.probe = virtio_mem_probe,
1943*4882a593Smuzhiyun 	.remove = virtio_mem_remove,
1944*4882a593Smuzhiyun 	.config_changed = virtio_mem_config_changed,
1945*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
1946*4882a593Smuzhiyun 	.freeze	=	virtio_mem_freeze,
1947*4882a593Smuzhiyun 	.restore =	virtio_mem_restore,
1948*4882a593Smuzhiyun #endif
1949*4882a593Smuzhiyun };
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun module_virtio_driver(virtio_mem_driver);
1952*4882a593Smuzhiyun MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
1953*4882a593Smuzhiyun MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
1954*4882a593Smuzhiyun MODULE_DESCRIPTION("Virtio-mem driver");
1955*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1956