xref: /OK3568_Linux_fs/kernel/fs/fscache/object.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* FS-Cache object state machine handler
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5*4882a593Smuzhiyun  * Written by David Howells (dhowells@redhat.com)
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * See Documentation/filesystems/caching/object.rst for a description of the
8*4882a593Smuzhiyun  * object state machine and the in-kernel representations.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define FSCACHE_DEBUG_LEVEL COOKIE
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/prefetch.h>
15*4882a593Smuzhiyun #include "internal.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
18*4882a593Smuzhiyun static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
19*4882a593Smuzhiyun static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
20*4882a593Smuzhiyun static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
21*4882a593Smuzhiyun static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
22*4882a593Smuzhiyun static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
23*4882a593Smuzhiyun static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
24*4882a593Smuzhiyun static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
25*4882a593Smuzhiyun static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
26*4882a593Smuzhiyun static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
27*4882a593Smuzhiyun static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
28*4882a593Smuzhiyun static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
29*4882a593Smuzhiyun static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define __STATE_NAME(n) fscache_osm_##n
32*4882a593Smuzhiyun #define STATE(n) (&__STATE_NAME(n))
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun  * Define a work state.  Work states are execution states.  No event processing
36*4882a593Smuzhiyun  * is performed by them.  The function attached to a work state returns a
37*4882a593Smuzhiyun  * pointer indicating the next state to which the state machine should
38*4882a593Smuzhiyun  * transition.  Returning NO_TRANSIT repeats the current state, but goes back
39*4882a593Smuzhiyun  * to the scheduler first.
40*4882a593Smuzhiyun  */
41*4882a593Smuzhiyun #define WORK_STATE(n, sn, f) \
42*4882a593Smuzhiyun 	const struct fscache_state __STATE_NAME(n) = {			\
43*4882a593Smuzhiyun 		.name = #n,						\
44*4882a593Smuzhiyun 		.short_name = sn,					\
45*4882a593Smuzhiyun 		.work = f						\
46*4882a593Smuzhiyun 	}
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * Returns from work states.
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define NO_TRANSIT ((struct fscache_state *)NULL)
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun  * Define a wait state.  Wait states are event processing states.  No execution
57*4882a593Smuzhiyun  * is performed by them.  Wait states are just tables of "if event X occurs,
58*4882a593Smuzhiyun  * clear it and transition to state Y".  The dispatcher returns to the
59*4882a593Smuzhiyun  * scheduler if none of the events in which the wait state has an interest are
60*4882a593Smuzhiyun  * currently pending.
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun #define WAIT_STATE(n, sn, ...) \
63*4882a593Smuzhiyun 	const struct fscache_state __STATE_NAME(n) = {			\
64*4882a593Smuzhiyun 		.name = #n,						\
65*4882a593Smuzhiyun 		.short_name = sn,					\
66*4882a593Smuzhiyun 		.work = NULL,						\
67*4882a593Smuzhiyun 		.transitions = { __VA_ARGS__, { 0, NULL } }		\
68*4882a593Smuzhiyun 	}
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #define TRANSIT_TO(state, emask) \
71*4882a593Smuzhiyun 	{ .events = (emask), .transit_to = STATE(state) }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * The object state machine.
75*4882a593Smuzhiyun  */
76*4882a593Smuzhiyun static WORK_STATE(INIT_OBJECT,		"INIT", fscache_initialise_object);
77*4882a593Smuzhiyun static WORK_STATE(PARENT_READY,		"PRDY", fscache_parent_ready);
78*4882a593Smuzhiyun static WORK_STATE(ABORT_INIT,		"ABRT", fscache_abort_initialisation);
79*4882a593Smuzhiyun static WORK_STATE(LOOK_UP_OBJECT,	"LOOK", fscache_look_up_object);
80*4882a593Smuzhiyun static WORK_STATE(CREATE_OBJECT,	"CRTO", fscache_look_up_object);
81*4882a593Smuzhiyun static WORK_STATE(OBJECT_AVAILABLE,	"AVBL", fscache_object_available);
82*4882a593Smuzhiyun static WORK_STATE(JUMPSTART_DEPS,	"JUMP", fscache_jumpstart_dependents);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun static WORK_STATE(INVALIDATE_OBJECT,	"INVL", fscache_invalidate_object);
85*4882a593Smuzhiyun static WORK_STATE(UPDATE_OBJECT,	"UPDT", fscache_update_object);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun static WORK_STATE(LOOKUP_FAILURE,	"LCFL", fscache_lookup_failure);
88*4882a593Smuzhiyun static WORK_STATE(KILL_OBJECT,		"KILL", fscache_kill_object);
89*4882a593Smuzhiyun static WORK_STATE(KILL_DEPENDENTS,	"KDEP", fscache_kill_dependents);
90*4882a593Smuzhiyun static WORK_STATE(DROP_OBJECT,		"DROP", fscache_drop_object);
91*4882a593Smuzhiyun static WORK_STATE(OBJECT_DEAD,		"DEAD", fscache_object_dead);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun static WAIT_STATE(WAIT_FOR_INIT,	"?INI",
94*4882a593Smuzhiyun 		  TRANSIT_TO(INIT_OBJECT,	1 << FSCACHE_OBJECT_EV_NEW_CHILD));
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun static WAIT_STATE(WAIT_FOR_PARENT,	"?PRN",
97*4882a593Smuzhiyun 		  TRANSIT_TO(PARENT_READY,	1 << FSCACHE_OBJECT_EV_PARENT_READY));
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun static WAIT_STATE(WAIT_FOR_CMD,		"?CMD",
100*4882a593Smuzhiyun 		  TRANSIT_TO(INVALIDATE_OBJECT,	1 << FSCACHE_OBJECT_EV_INVALIDATE),
101*4882a593Smuzhiyun 		  TRANSIT_TO(UPDATE_OBJECT,	1 << FSCACHE_OBJECT_EV_UPDATE),
102*4882a593Smuzhiyun 		  TRANSIT_TO(JUMPSTART_DEPS,	1 << FSCACHE_OBJECT_EV_NEW_CHILD));
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun static WAIT_STATE(WAIT_FOR_CLEARANCE,	"?CLR",
105*4882a593Smuzhiyun 		  TRANSIT_TO(KILL_OBJECT,	1 << FSCACHE_OBJECT_EV_CLEARED));
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun  * Out-of-band event transition tables.  These are for handling unexpected
109*4882a593Smuzhiyun  * events, such as an I/O error.  If an OOB event occurs, the state machine
110*4882a593Smuzhiyun  * clears and disables the event and forces a transition to the nominated work
111*4882a593Smuzhiyun  * state (acurrently executing work states will complete first).
112*4882a593Smuzhiyun  *
113*4882a593Smuzhiyun  * In such a situation, object->state remembers the state the machine should
114*4882a593Smuzhiyun  * have been in/gone to and returning NO_TRANSIT returns to that.
115*4882a593Smuzhiyun  */
116*4882a593Smuzhiyun static const struct fscache_transition fscache_osm_init_oob[] = {
117*4882a593Smuzhiyun 	   TRANSIT_TO(ABORT_INIT,
118*4882a593Smuzhiyun 		      (1 << FSCACHE_OBJECT_EV_ERROR) |
119*4882a593Smuzhiyun 		      (1 << FSCACHE_OBJECT_EV_KILL)),
120*4882a593Smuzhiyun 	   { 0, NULL }
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun static const struct fscache_transition fscache_osm_lookup_oob[] = {
124*4882a593Smuzhiyun 	   TRANSIT_TO(LOOKUP_FAILURE,
125*4882a593Smuzhiyun 		      (1 << FSCACHE_OBJECT_EV_ERROR) |
126*4882a593Smuzhiyun 		      (1 << FSCACHE_OBJECT_EV_KILL)),
127*4882a593Smuzhiyun 	   { 0, NULL }
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun static const struct fscache_transition fscache_osm_run_oob[] = {
131*4882a593Smuzhiyun 	   TRANSIT_TO(KILL_OBJECT,
132*4882a593Smuzhiyun 		      (1 << FSCACHE_OBJECT_EV_ERROR) |
133*4882a593Smuzhiyun 		      (1 << FSCACHE_OBJECT_EV_KILL)),
134*4882a593Smuzhiyun 	   { 0, NULL }
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun static int  fscache_get_object(struct fscache_object *,
138*4882a593Smuzhiyun 			       enum fscache_obj_ref_trace);
139*4882a593Smuzhiyun static void fscache_put_object(struct fscache_object *,
140*4882a593Smuzhiyun 			       enum fscache_obj_ref_trace);
141*4882a593Smuzhiyun static bool fscache_enqueue_dependents(struct fscache_object *, int);
142*4882a593Smuzhiyun static void fscache_dequeue_object(struct fscache_object *);
143*4882a593Smuzhiyun static void fscache_update_aux_data(struct fscache_object *);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun  * we need to notify the parent when an op completes that we had outstanding
147*4882a593Smuzhiyun  * upon it
148*4882a593Smuzhiyun  */
fscache_done_parent_op(struct fscache_object * object)149*4882a593Smuzhiyun static inline void fscache_done_parent_op(struct fscache_object *object)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	struct fscache_object *parent = object->parent;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	_enter("OBJ%x {OBJ%x,%x}",
154*4882a593Smuzhiyun 	       object->debug_id, parent->debug_id, parent->n_ops);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	spin_lock_nested(&parent->lock, 1);
157*4882a593Smuzhiyun 	parent->n_obj_ops--;
158*4882a593Smuzhiyun 	parent->n_ops--;
159*4882a593Smuzhiyun 	if (parent->n_ops == 0)
160*4882a593Smuzhiyun 		fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
161*4882a593Smuzhiyun 	spin_unlock(&parent->lock);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun  * Object state machine dispatcher.
166*4882a593Smuzhiyun  */
fscache_object_sm_dispatcher(struct fscache_object * object)167*4882a593Smuzhiyun static void fscache_object_sm_dispatcher(struct fscache_object *object)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	const struct fscache_transition *t;
170*4882a593Smuzhiyun 	const struct fscache_state *state, *new_state;
171*4882a593Smuzhiyun 	unsigned long events, event_mask;
172*4882a593Smuzhiyun 	bool oob;
173*4882a593Smuzhiyun 	int event = -1;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	ASSERT(object != NULL);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	_enter("{OBJ%x,%s,%lx}",
178*4882a593Smuzhiyun 	       object->debug_id, object->state->name, object->events);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	event_mask = object->event_mask;
181*4882a593Smuzhiyun restart:
182*4882a593Smuzhiyun 	object->event_mask = 0; /* Mask normal event handling */
183*4882a593Smuzhiyun 	state = object->state;
184*4882a593Smuzhiyun restart_masked:
185*4882a593Smuzhiyun 	events = object->events;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	/* Handle any out-of-band events (typically an error) */
188*4882a593Smuzhiyun 	if (events & object->oob_event_mask) {
189*4882a593Smuzhiyun 		_debug("{OBJ%x} oob %lx",
190*4882a593Smuzhiyun 		       object->debug_id, events & object->oob_event_mask);
191*4882a593Smuzhiyun 		oob = true;
192*4882a593Smuzhiyun 		for (t = object->oob_table; t->events; t++) {
193*4882a593Smuzhiyun 			if (events & t->events) {
194*4882a593Smuzhiyun 				state = t->transit_to;
195*4882a593Smuzhiyun 				ASSERT(state->work != NULL);
196*4882a593Smuzhiyun 				event = fls(events & t->events) - 1;
197*4882a593Smuzhiyun 				__clear_bit(event, &object->oob_event_mask);
198*4882a593Smuzhiyun 				clear_bit(event, &object->events);
199*4882a593Smuzhiyun 				goto execute_work_state;
200*4882a593Smuzhiyun 			}
201*4882a593Smuzhiyun 		}
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 	oob = false;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/* Wait states are just transition tables */
206*4882a593Smuzhiyun 	if (!state->work) {
207*4882a593Smuzhiyun 		if (events & event_mask) {
208*4882a593Smuzhiyun 			for (t = state->transitions; t->events; t++) {
209*4882a593Smuzhiyun 				if (events & t->events) {
210*4882a593Smuzhiyun 					new_state = t->transit_to;
211*4882a593Smuzhiyun 					event = fls(events & t->events) - 1;
212*4882a593Smuzhiyun 					trace_fscache_osm(object, state,
213*4882a593Smuzhiyun 							  true, false, event);
214*4882a593Smuzhiyun 					clear_bit(event, &object->events);
215*4882a593Smuzhiyun 					_debug("{OBJ%x} ev %d: %s -> %s",
216*4882a593Smuzhiyun 					       object->debug_id, event,
217*4882a593Smuzhiyun 					       state->name, new_state->name);
218*4882a593Smuzhiyun 					object->state = state = new_state;
219*4882a593Smuzhiyun 					goto execute_work_state;
220*4882a593Smuzhiyun 				}
221*4882a593Smuzhiyun 			}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 			/* The event mask didn't include all the tabled bits */
224*4882a593Smuzhiyun 			BUG();
225*4882a593Smuzhiyun 		}
226*4882a593Smuzhiyun 		/* Randomly woke up */
227*4882a593Smuzhiyun 		goto unmask_events;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun execute_work_state:
231*4882a593Smuzhiyun 	_debug("{OBJ%x} exec %s", object->debug_id, state->name);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	trace_fscache_osm(object, state, false, oob, event);
234*4882a593Smuzhiyun 	new_state = state->work(object, event);
235*4882a593Smuzhiyun 	event = -1;
236*4882a593Smuzhiyun 	if (new_state == NO_TRANSIT) {
237*4882a593Smuzhiyun 		_debug("{OBJ%x} %s notrans", object->debug_id, state->name);
238*4882a593Smuzhiyun 		if (unlikely(state == STATE(OBJECT_DEAD))) {
239*4882a593Smuzhiyun 			_leave(" [dead]");
240*4882a593Smuzhiyun 			return;
241*4882a593Smuzhiyun 		}
242*4882a593Smuzhiyun 		fscache_enqueue_object(object);
243*4882a593Smuzhiyun 		event_mask = object->oob_event_mask;
244*4882a593Smuzhiyun 		goto unmask_events;
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	_debug("{OBJ%x} %s -> %s",
248*4882a593Smuzhiyun 	       object->debug_id, state->name, new_state->name);
249*4882a593Smuzhiyun 	object->state = state = new_state;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	if (state->work) {
252*4882a593Smuzhiyun 		if (unlikely(state == STATE(OBJECT_DEAD))) {
253*4882a593Smuzhiyun 			_leave(" [dead]");
254*4882a593Smuzhiyun 			return;
255*4882a593Smuzhiyun 		}
256*4882a593Smuzhiyun 		goto restart_masked;
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	/* Transited to wait state */
260*4882a593Smuzhiyun 	event_mask = object->oob_event_mask;
261*4882a593Smuzhiyun 	for (t = state->transitions; t->events; t++)
262*4882a593Smuzhiyun 		event_mask |= t->events;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun unmask_events:
265*4882a593Smuzhiyun 	object->event_mask = event_mask;
266*4882a593Smuzhiyun 	smp_mb();
267*4882a593Smuzhiyun 	events = object->events;
268*4882a593Smuzhiyun 	if (events & event_mask)
269*4882a593Smuzhiyun 		goto restart;
270*4882a593Smuzhiyun 	_leave(" [msk %lx]", event_mask);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun  * execute an object
275*4882a593Smuzhiyun  */
fscache_object_work_func(struct work_struct * work)276*4882a593Smuzhiyun static void fscache_object_work_func(struct work_struct *work)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	struct fscache_object *object =
279*4882a593Smuzhiyun 		container_of(work, struct fscache_object, work);
280*4882a593Smuzhiyun 	unsigned long start;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	_enter("{OBJ%x}", object->debug_id);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	start = jiffies;
285*4882a593Smuzhiyun 	fscache_object_sm_dispatcher(object);
286*4882a593Smuzhiyun 	fscache_hist(fscache_objs_histogram, start);
287*4882a593Smuzhiyun 	fscache_put_object(object, fscache_obj_put_work);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun  * fscache_object_init - Initialise a cache object description
292*4882a593Smuzhiyun  * @object: Object description
293*4882a593Smuzhiyun  * @cookie: Cookie object will be attached to
294*4882a593Smuzhiyun  * @cache: Cache in which backing object will be found
295*4882a593Smuzhiyun  *
296*4882a593Smuzhiyun  * Initialise a cache object description to its basic values.
297*4882a593Smuzhiyun  *
298*4882a593Smuzhiyun  * See Documentation/filesystems/caching/backend-api.rst for a complete
299*4882a593Smuzhiyun  * description.
300*4882a593Smuzhiyun  */
fscache_object_init(struct fscache_object * object,struct fscache_cookie * cookie,struct fscache_cache * cache)301*4882a593Smuzhiyun void fscache_object_init(struct fscache_object *object,
302*4882a593Smuzhiyun 			 struct fscache_cookie *cookie,
303*4882a593Smuzhiyun 			 struct fscache_cache *cache)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	const struct fscache_transition *t;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	atomic_inc(&cache->object_count);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	object->state = STATE(WAIT_FOR_INIT);
310*4882a593Smuzhiyun 	object->oob_table = fscache_osm_init_oob;
311*4882a593Smuzhiyun 	object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
312*4882a593Smuzhiyun 	spin_lock_init(&object->lock);
313*4882a593Smuzhiyun 	INIT_LIST_HEAD(&object->cache_link);
314*4882a593Smuzhiyun 	INIT_HLIST_NODE(&object->cookie_link);
315*4882a593Smuzhiyun 	INIT_WORK(&object->work, fscache_object_work_func);
316*4882a593Smuzhiyun 	INIT_LIST_HEAD(&object->dependents);
317*4882a593Smuzhiyun 	INIT_LIST_HEAD(&object->dep_link);
318*4882a593Smuzhiyun 	INIT_LIST_HEAD(&object->pending_ops);
319*4882a593Smuzhiyun 	object->n_children = 0;
320*4882a593Smuzhiyun 	object->n_ops = object->n_in_progress = object->n_exclusive = 0;
321*4882a593Smuzhiyun 	object->events = 0;
322*4882a593Smuzhiyun 	object->store_limit = 0;
323*4882a593Smuzhiyun 	object->store_limit_l = 0;
324*4882a593Smuzhiyun 	object->cache = cache;
325*4882a593Smuzhiyun 	object->cookie = cookie;
326*4882a593Smuzhiyun 	fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
327*4882a593Smuzhiyun 	object->parent = NULL;
328*4882a593Smuzhiyun #ifdef CONFIG_FSCACHE_OBJECT_LIST
329*4882a593Smuzhiyun 	RB_CLEAR_NODE(&object->objlist_link);
330*4882a593Smuzhiyun #endif
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	object->oob_event_mask = 0;
333*4882a593Smuzhiyun 	for (t = object->oob_table; t->events; t++)
334*4882a593Smuzhiyun 		object->oob_event_mask |= t->events;
335*4882a593Smuzhiyun 	object->event_mask = object->oob_event_mask;
336*4882a593Smuzhiyun 	for (t = object->state->transitions; t->events; t++)
337*4882a593Smuzhiyun 		object->event_mask |= t->events;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_object_init);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun  * Mark the object as no longer being live, making sure that we synchronise
343*4882a593Smuzhiyun  * against op submission.
344*4882a593Smuzhiyun  */
fscache_mark_object_dead(struct fscache_object * object)345*4882a593Smuzhiyun static inline void fscache_mark_object_dead(struct fscache_object *object)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	spin_lock(&object->lock);
348*4882a593Smuzhiyun 	clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
349*4882a593Smuzhiyun 	spin_unlock(&object->lock);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun /*
353*4882a593Smuzhiyun  * Abort object initialisation before we start it.
354*4882a593Smuzhiyun  */
fscache_abort_initialisation(struct fscache_object * object,int event)355*4882a593Smuzhiyun static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
356*4882a593Smuzhiyun 								int event)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	_enter("{OBJ%x},%d", object->debug_id, event);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	object->oob_event_mask = 0;
361*4882a593Smuzhiyun 	fscache_dequeue_object(object);
362*4882a593Smuzhiyun 	return transit_to(KILL_OBJECT);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun  * initialise an object
367*4882a593Smuzhiyun  * - check the specified object's parent to see if we can make use of it
368*4882a593Smuzhiyun  *   immediately to do a creation
369*4882a593Smuzhiyun  * - we may need to start the process of creating a parent and we need to wait
370*4882a593Smuzhiyun  *   for the parent's lookup and creation to complete if it's not there yet
371*4882a593Smuzhiyun  */
fscache_initialise_object(struct fscache_object * object,int event)372*4882a593Smuzhiyun static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
373*4882a593Smuzhiyun 							     int event)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	struct fscache_object *parent;
376*4882a593Smuzhiyun 	bool success;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	_enter("{OBJ%x},%d", object->debug_id, event);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	ASSERT(list_empty(&object->dep_link));
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	parent = object->parent;
383*4882a593Smuzhiyun 	if (!parent) {
384*4882a593Smuzhiyun 		_leave(" [no parent]");
385*4882a593Smuzhiyun 		return transit_to(DROP_OBJECT);
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	_debug("parent: %s of:%lx", parent->state->name, parent->flags);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	if (fscache_object_is_dying(parent)) {
391*4882a593Smuzhiyun 		_leave(" [bad parent]");
392*4882a593Smuzhiyun 		return transit_to(DROP_OBJECT);
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	if (fscache_object_is_available(parent)) {
396*4882a593Smuzhiyun 		_leave(" [ready]");
397*4882a593Smuzhiyun 		return transit_to(PARENT_READY);
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	_debug("wait");
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	spin_lock(&parent->lock);
403*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cop_grab_object);
404*4882a593Smuzhiyun 	success = false;
405*4882a593Smuzhiyun 	if (fscache_object_is_live(parent) &&
406*4882a593Smuzhiyun 	    object->cache->ops->grab_object(object, fscache_obj_get_add_to_deps)) {
407*4882a593Smuzhiyun 		list_add(&object->dep_link, &parent->dependents);
408*4882a593Smuzhiyun 		success = true;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 	fscache_stat_d(&fscache_n_cop_grab_object);
411*4882a593Smuzhiyun 	spin_unlock(&parent->lock);
412*4882a593Smuzhiyun 	if (!success) {
413*4882a593Smuzhiyun 		_leave(" [grab failed]");
414*4882a593Smuzhiyun 		return transit_to(DROP_OBJECT);
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	/* fscache_acquire_non_index_cookie() uses this
418*4882a593Smuzhiyun 	 * to wake the chain up */
419*4882a593Smuzhiyun 	fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
420*4882a593Smuzhiyun 	_leave(" [wait]");
421*4882a593Smuzhiyun 	return transit_to(WAIT_FOR_PARENT);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun /*
425*4882a593Smuzhiyun  * Once the parent object is ready, we should kick off our lookup op.
426*4882a593Smuzhiyun  */
fscache_parent_ready(struct fscache_object * object,int event)427*4882a593Smuzhiyun static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
428*4882a593Smuzhiyun 							int event)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	struct fscache_object *parent = object->parent;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	_enter("{OBJ%x},%d", object->debug_id, event);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	ASSERT(parent != NULL);
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	spin_lock(&parent->lock);
437*4882a593Smuzhiyun 	parent->n_ops++;
438*4882a593Smuzhiyun 	parent->n_obj_ops++;
439*4882a593Smuzhiyun 	object->lookup_jif = jiffies;
440*4882a593Smuzhiyun 	spin_unlock(&parent->lock);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	_leave("");
443*4882a593Smuzhiyun 	return transit_to(LOOK_UP_OBJECT);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun /*
447*4882a593Smuzhiyun  * look an object up in the cache from which it was allocated
448*4882a593Smuzhiyun  * - we hold an "access lock" on the parent object, so the parent object cannot
449*4882a593Smuzhiyun  *   be withdrawn by either party till we've finished
450*4882a593Smuzhiyun  */
fscache_look_up_object(struct fscache_object * object,int event)451*4882a593Smuzhiyun static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
452*4882a593Smuzhiyun 							  int event)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	struct fscache_cookie *cookie = object->cookie;
455*4882a593Smuzhiyun 	struct fscache_object *parent = object->parent;
456*4882a593Smuzhiyun 	int ret;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	_enter("{OBJ%x},%d", object->debug_id, event);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	object->oob_table = fscache_osm_lookup_oob;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	ASSERT(parent != NULL);
463*4882a593Smuzhiyun 	ASSERTCMP(parent->n_ops, >, 0);
464*4882a593Smuzhiyun 	ASSERTCMP(parent->n_obj_ops, >, 0);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* make sure the parent is still available */
467*4882a593Smuzhiyun 	ASSERT(fscache_object_is_available(parent));
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (fscache_object_is_dying(parent) ||
470*4882a593Smuzhiyun 	    test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
471*4882a593Smuzhiyun 	    !fscache_use_cookie(object)) {
472*4882a593Smuzhiyun 		_leave(" [unavailable]");
473*4882a593Smuzhiyun 		return transit_to(LOOKUP_FAILURE);
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	_debug("LOOKUP \"%s\" in \"%s\"",
477*4882a593Smuzhiyun 	       cookie->def->name, object->cache->tag->name);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	fscache_stat(&fscache_n_object_lookups);
480*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cop_lookup_object);
481*4882a593Smuzhiyun 	ret = object->cache->ops->lookup_object(object);
482*4882a593Smuzhiyun 	fscache_stat_d(&fscache_n_cop_lookup_object);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	fscache_unuse_cookie(object);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (ret == -ETIMEDOUT) {
487*4882a593Smuzhiyun 		/* probably stuck behind another object, so move this one to
488*4882a593Smuzhiyun 		 * the back of the queue */
489*4882a593Smuzhiyun 		fscache_stat(&fscache_n_object_lookups_timed_out);
490*4882a593Smuzhiyun 		_leave(" [timeout]");
491*4882a593Smuzhiyun 		return NO_TRANSIT;
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	if (ret < 0) {
495*4882a593Smuzhiyun 		_leave(" [error]");
496*4882a593Smuzhiyun 		return transit_to(LOOKUP_FAILURE);
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	_leave(" [ok]");
500*4882a593Smuzhiyun 	return transit_to(OBJECT_AVAILABLE);
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun /**
504*4882a593Smuzhiyun  * fscache_object_lookup_negative - Note negative cookie lookup
505*4882a593Smuzhiyun  * @object: Object pointing to cookie to mark
506*4882a593Smuzhiyun  *
507*4882a593Smuzhiyun  * Note negative lookup, permitting those waiting to read data from an already
508*4882a593Smuzhiyun  * existing backing object to continue as there's no data for them to read.
509*4882a593Smuzhiyun  */
fscache_object_lookup_negative(struct fscache_object * object)510*4882a593Smuzhiyun void fscache_object_lookup_negative(struct fscache_object *object)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	struct fscache_cookie *cookie = object->cookie;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	_enter("{OBJ%x,%s}", object->debug_id, object->state->name);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
517*4882a593Smuzhiyun 		fscache_stat(&fscache_n_object_lookups_negative);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 		/* Allow write requests to begin stacking up and read requests to begin
520*4882a593Smuzhiyun 		 * returning ENODATA.
521*4882a593Smuzhiyun 		 */
522*4882a593Smuzhiyun 		set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
523*4882a593Smuzhiyun 		clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 		_debug("wake up lookup %p", &cookie->flags);
526*4882a593Smuzhiyun 		clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
527*4882a593Smuzhiyun 		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
528*4882a593Smuzhiyun 	}
529*4882a593Smuzhiyun 	_leave("");
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_object_lookup_negative);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun /**
534*4882a593Smuzhiyun  * fscache_obtained_object - Note successful object lookup or creation
535*4882a593Smuzhiyun  * @object: Object pointing to cookie to mark
536*4882a593Smuzhiyun  *
537*4882a593Smuzhiyun  * Note successful lookup and/or creation, permitting those waiting to write
538*4882a593Smuzhiyun  * data to a backing object to continue.
539*4882a593Smuzhiyun  *
540*4882a593Smuzhiyun  * Note that after calling this, an object's cookie may be relinquished by the
541*4882a593Smuzhiyun  * netfs, and so must be accessed with object lock held.
542*4882a593Smuzhiyun  */
fscache_obtained_object(struct fscache_object * object)543*4882a593Smuzhiyun void fscache_obtained_object(struct fscache_object *object)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	struct fscache_cookie *cookie = object->cookie;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	_enter("{OBJ%x,%s}", object->debug_id, object->state->name);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	/* if we were still looking up, then we must have a positive lookup
550*4882a593Smuzhiyun 	 * result, in which case there may be data available */
551*4882a593Smuzhiyun 	if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
552*4882a593Smuzhiyun 		fscache_stat(&fscache_n_object_lookups_positive);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 		/* We do (presumably) have data */
555*4882a593Smuzhiyun 		clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
556*4882a593Smuzhiyun 		clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 		/* Allow write requests to begin stacking up and read requests
559*4882a593Smuzhiyun 		 * to begin shovelling data.
560*4882a593Smuzhiyun 		 */
561*4882a593Smuzhiyun 		clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
562*4882a593Smuzhiyun 		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
563*4882a593Smuzhiyun 	} else {
564*4882a593Smuzhiyun 		fscache_stat(&fscache_n_object_created);
565*4882a593Smuzhiyun 	}
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
568*4882a593Smuzhiyun 	_leave("");
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_obtained_object);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun /*
573*4882a593Smuzhiyun  * handle an object that has just become available
574*4882a593Smuzhiyun  */
fscache_object_available(struct fscache_object * object,int event)575*4882a593Smuzhiyun static const struct fscache_state *fscache_object_available(struct fscache_object *object,
576*4882a593Smuzhiyun 							    int event)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun 	_enter("{OBJ%x},%d", object->debug_id, event);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	object->oob_table = fscache_osm_run_oob;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	spin_lock(&object->lock);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	fscache_done_parent_op(object);
585*4882a593Smuzhiyun 	if (object->n_in_progress == 0) {
586*4882a593Smuzhiyun 		if (object->n_ops > 0) {
587*4882a593Smuzhiyun 			ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
588*4882a593Smuzhiyun 			fscache_start_operations(object);
589*4882a593Smuzhiyun 		} else {
590*4882a593Smuzhiyun 			ASSERT(list_empty(&object->pending_ops));
591*4882a593Smuzhiyun 		}
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 	spin_unlock(&object->lock);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cop_lookup_complete);
596*4882a593Smuzhiyun 	object->cache->ops->lookup_complete(object);
597*4882a593Smuzhiyun 	fscache_stat_d(&fscache_n_cop_lookup_complete);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
600*4882a593Smuzhiyun 	fscache_stat(&fscache_n_object_avail);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	_leave("");
603*4882a593Smuzhiyun 	return transit_to(JUMPSTART_DEPS);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun /*
607*4882a593Smuzhiyun  * Wake up this object's dependent objects now that we've become available.
608*4882a593Smuzhiyun  */
fscache_jumpstart_dependents(struct fscache_object * object,int event)609*4882a593Smuzhiyun static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
610*4882a593Smuzhiyun 								int event)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	_enter("{OBJ%x},%d", object->debug_id, event);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
615*4882a593Smuzhiyun 		return NO_TRANSIT; /* Not finished; requeue */
616*4882a593Smuzhiyun 	return transit_to(WAIT_FOR_CMD);
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun /*
620*4882a593Smuzhiyun  * Handle lookup or creation failute.
621*4882a593Smuzhiyun  */
fscache_lookup_failure(struct fscache_object * object,int event)622*4882a593Smuzhiyun static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
623*4882a593Smuzhiyun 							  int event)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun 	struct fscache_cookie *cookie;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	_enter("{OBJ%x},%d", object->debug_id, event);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	object->oob_event_mask = 0;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cop_lookup_complete);
632*4882a593Smuzhiyun 	object->cache->ops->lookup_complete(object);
633*4882a593Smuzhiyun 	fscache_stat_d(&fscache_n_cop_lookup_complete);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	cookie = object->cookie;
638*4882a593Smuzhiyun 	set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
639*4882a593Smuzhiyun 	if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
640*4882a593Smuzhiyun 		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	fscache_done_parent_op(object);
643*4882a593Smuzhiyun 	return transit_to(KILL_OBJECT);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun  * Wait for completion of all active operations on this object and the death of
648*4882a593Smuzhiyun  * all child objects of this object.
649*4882a593Smuzhiyun  */
fscache_kill_object(struct fscache_object * object,int event)650*4882a593Smuzhiyun static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
651*4882a593Smuzhiyun 						       int event)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	_enter("{OBJ%x,%d,%d},%d",
654*4882a593Smuzhiyun 	       object->debug_id, object->n_ops, object->n_children, event);
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	fscache_mark_object_dead(object);
657*4882a593Smuzhiyun 	object->oob_event_mask = 0;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
660*4882a593Smuzhiyun 		/* Reject any new read/write ops and abort any that are pending. */
661*4882a593Smuzhiyun 		clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
662*4882a593Smuzhiyun 		fscache_cancel_all_ops(object);
663*4882a593Smuzhiyun 	}
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	if (list_empty(&object->dependents) &&
666*4882a593Smuzhiyun 	    object->n_ops == 0 &&
667*4882a593Smuzhiyun 	    object->n_children == 0)
668*4882a593Smuzhiyun 		return transit_to(DROP_OBJECT);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	if (object->n_in_progress == 0) {
671*4882a593Smuzhiyun 		spin_lock(&object->lock);
672*4882a593Smuzhiyun 		if (object->n_ops > 0 && object->n_in_progress == 0)
673*4882a593Smuzhiyun 			fscache_start_operations(object);
674*4882a593Smuzhiyun 		spin_unlock(&object->lock);
675*4882a593Smuzhiyun 	}
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	if (!list_empty(&object->dependents))
678*4882a593Smuzhiyun 		return transit_to(KILL_DEPENDENTS);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	return transit_to(WAIT_FOR_CLEARANCE);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun /*
684*4882a593Smuzhiyun  * Kill dependent objects.
685*4882a593Smuzhiyun  */
fscache_kill_dependents(struct fscache_object * object,int event)686*4882a593Smuzhiyun static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
687*4882a593Smuzhiyun 							   int event)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun 	_enter("{OBJ%x},%d", object->debug_id, event);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
692*4882a593Smuzhiyun 		return NO_TRANSIT; /* Not finished */
693*4882a593Smuzhiyun 	return transit_to(WAIT_FOR_CLEARANCE);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun /*
697*4882a593Smuzhiyun  * Drop an object's attachments
698*4882a593Smuzhiyun  */
fscache_drop_object(struct fscache_object * object,int event)699*4882a593Smuzhiyun static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
700*4882a593Smuzhiyun 						       int event)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	struct fscache_object *parent = object->parent;
703*4882a593Smuzhiyun 	struct fscache_cookie *cookie = object->cookie;
704*4882a593Smuzhiyun 	struct fscache_cache *cache = object->cache;
705*4882a593Smuzhiyun 	bool awaken = false;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	_enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	ASSERT(cookie != NULL);
710*4882a593Smuzhiyun 	ASSERT(!hlist_unhashed(&object->cookie_link));
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	if (test_bit(FSCACHE_COOKIE_AUX_UPDATED, &cookie->flags)) {
713*4882a593Smuzhiyun 		_debug("final update");
714*4882a593Smuzhiyun 		fscache_update_aux_data(object);
715*4882a593Smuzhiyun 	}
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	/* Make sure the cookie no longer points here and that the netfs isn't
718*4882a593Smuzhiyun 	 * waiting for us.
719*4882a593Smuzhiyun 	 */
720*4882a593Smuzhiyun 	spin_lock(&cookie->lock);
721*4882a593Smuzhiyun 	hlist_del_init(&object->cookie_link);
722*4882a593Smuzhiyun 	if (hlist_empty(&cookie->backing_objects) &&
723*4882a593Smuzhiyun 	    test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
724*4882a593Smuzhiyun 		awaken = true;
725*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	if (awaken)
728*4882a593Smuzhiyun 		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
729*4882a593Smuzhiyun 	if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
730*4882a593Smuzhiyun 		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	/* Prevent a race with our last child, which has to signal EV_CLEARED
734*4882a593Smuzhiyun 	 * before dropping our spinlock.
735*4882a593Smuzhiyun 	 */
736*4882a593Smuzhiyun 	spin_lock(&object->lock);
737*4882a593Smuzhiyun 	spin_unlock(&object->lock);
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	/* Discard from the cache's collection of objects */
740*4882a593Smuzhiyun 	spin_lock(&cache->object_list_lock);
741*4882a593Smuzhiyun 	list_del_init(&object->cache_link);
742*4882a593Smuzhiyun 	spin_unlock(&cache->object_list_lock);
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cop_drop_object);
745*4882a593Smuzhiyun 	cache->ops->drop_object(object);
746*4882a593Smuzhiyun 	fscache_stat_d(&fscache_n_cop_drop_object);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	/* The parent object wants to know when all it dependents have gone */
749*4882a593Smuzhiyun 	if (parent) {
750*4882a593Smuzhiyun 		_debug("release parent OBJ%x {%d}",
751*4882a593Smuzhiyun 		       parent->debug_id, parent->n_children);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 		spin_lock(&parent->lock);
754*4882a593Smuzhiyun 		parent->n_children--;
755*4882a593Smuzhiyun 		if (parent->n_children == 0)
756*4882a593Smuzhiyun 			fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
757*4882a593Smuzhiyun 		spin_unlock(&parent->lock);
758*4882a593Smuzhiyun 		object->parent = NULL;
759*4882a593Smuzhiyun 	}
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	/* this just shifts the object release to the work processor */
762*4882a593Smuzhiyun 	fscache_put_object(object, fscache_obj_put_drop_obj);
763*4882a593Smuzhiyun 	fscache_stat(&fscache_n_object_dead);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	_leave("");
766*4882a593Smuzhiyun 	return transit_to(OBJECT_DEAD);
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun /*
770*4882a593Smuzhiyun  * get a ref on an object
771*4882a593Smuzhiyun  */
fscache_get_object(struct fscache_object * object,enum fscache_obj_ref_trace why)772*4882a593Smuzhiyun static int fscache_get_object(struct fscache_object *object,
773*4882a593Smuzhiyun 			      enum fscache_obj_ref_trace why)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun 	int ret;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cop_grab_object);
778*4882a593Smuzhiyun 	ret = object->cache->ops->grab_object(object, why) ? 0 : -EAGAIN;
779*4882a593Smuzhiyun 	fscache_stat_d(&fscache_n_cop_grab_object);
780*4882a593Smuzhiyun 	return ret;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun /*
784*4882a593Smuzhiyun  * Discard a ref on an object
785*4882a593Smuzhiyun  */
fscache_put_object(struct fscache_object * object,enum fscache_obj_ref_trace why)786*4882a593Smuzhiyun static void fscache_put_object(struct fscache_object *object,
787*4882a593Smuzhiyun 			       enum fscache_obj_ref_trace why)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cop_put_object);
790*4882a593Smuzhiyun 	object->cache->ops->put_object(object, why);
791*4882a593Smuzhiyun 	fscache_stat_d(&fscache_n_cop_put_object);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun /**
795*4882a593Smuzhiyun  * fscache_object_destroy - Note that a cache object is about to be destroyed
796*4882a593Smuzhiyun  * @object: The object to be destroyed
797*4882a593Smuzhiyun  *
798*4882a593Smuzhiyun  * Note the imminent destruction and deallocation of a cache object record.
799*4882a593Smuzhiyun  */
fscache_object_destroy(struct fscache_object * object)800*4882a593Smuzhiyun void fscache_object_destroy(struct fscache_object *object)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun 	fscache_objlist_remove(object);
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	/* We can get rid of the cookie now */
805*4882a593Smuzhiyun 	fscache_cookie_put(object->cookie, fscache_cookie_put_object);
806*4882a593Smuzhiyun 	object->cookie = NULL;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_object_destroy);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun /*
811*4882a593Smuzhiyun  * enqueue an object for metadata-type processing
812*4882a593Smuzhiyun  */
fscache_enqueue_object(struct fscache_object * object)813*4882a593Smuzhiyun void fscache_enqueue_object(struct fscache_object *object)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun 	_enter("{OBJ%x}", object->debug_id);
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	if (fscache_get_object(object, fscache_obj_get_queue) >= 0) {
818*4882a593Smuzhiyun 		wait_queue_head_t *cong_wq =
819*4882a593Smuzhiyun 			&get_cpu_var(fscache_object_cong_wait);
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 		if (queue_work(fscache_object_wq, &object->work)) {
822*4882a593Smuzhiyun 			if (fscache_object_congested())
823*4882a593Smuzhiyun 				wake_up(cong_wq);
824*4882a593Smuzhiyun 		} else
825*4882a593Smuzhiyun 			fscache_put_object(object, fscache_obj_put_queue);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 		put_cpu_var(fscache_object_cong_wait);
828*4882a593Smuzhiyun 	}
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun /**
832*4882a593Smuzhiyun  * fscache_object_sleep_till_congested - Sleep until object wq is congested
833*4882a593Smuzhiyun  * @timeoutp: Scheduler sleep timeout
834*4882a593Smuzhiyun  *
835*4882a593Smuzhiyun  * Allow an object handler to sleep until the object workqueue is congested.
836*4882a593Smuzhiyun  *
837*4882a593Smuzhiyun  * The caller must set up a wake up event before calling this and must have set
838*4882a593Smuzhiyun  * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
839*4882a593Smuzhiyun  * condition before calling this function as no test is made here.
840*4882a593Smuzhiyun  *
841*4882a593Smuzhiyun  * %true is returned if the object wq is congested, %false otherwise.
842*4882a593Smuzhiyun  */
fscache_object_sleep_till_congested(signed long * timeoutp)843*4882a593Smuzhiyun bool fscache_object_sleep_till_congested(signed long *timeoutp)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun 	wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
846*4882a593Smuzhiyun 	DEFINE_WAIT(wait);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	if (fscache_object_congested())
849*4882a593Smuzhiyun 		return true;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	add_wait_queue_exclusive(cong_wq, &wait);
852*4882a593Smuzhiyun 	if (!fscache_object_congested())
853*4882a593Smuzhiyun 		*timeoutp = schedule_timeout(*timeoutp);
854*4882a593Smuzhiyun 	finish_wait(cong_wq, &wait);
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	return fscache_object_congested();
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun /*
861*4882a593Smuzhiyun  * Enqueue the dependents of an object for metadata-type processing.
862*4882a593Smuzhiyun  *
863*4882a593Smuzhiyun  * If we don't manage to finish the list before the scheduler wants to run
864*4882a593Smuzhiyun  * again then return false immediately.  We return true if the list was
865*4882a593Smuzhiyun  * cleared.
866*4882a593Smuzhiyun  */
fscache_enqueue_dependents(struct fscache_object * object,int event)867*4882a593Smuzhiyun static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun 	struct fscache_object *dep;
870*4882a593Smuzhiyun 	bool ret = true;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	_enter("{OBJ%x}", object->debug_id);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	if (list_empty(&object->dependents))
875*4882a593Smuzhiyun 		return true;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	spin_lock(&object->lock);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	while (!list_empty(&object->dependents)) {
880*4882a593Smuzhiyun 		dep = list_entry(object->dependents.next,
881*4882a593Smuzhiyun 				 struct fscache_object, dep_link);
882*4882a593Smuzhiyun 		list_del_init(&dep->dep_link);
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 		fscache_raise_event(dep, event);
885*4882a593Smuzhiyun 		fscache_put_object(dep, fscache_obj_put_enq_dep);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		if (!list_empty(&object->dependents) && need_resched()) {
888*4882a593Smuzhiyun 			ret = false;
889*4882a593Smuzhiyun 			break;
890*4882a593Smuzhiyun 		}
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	spin_unlock(&object->lock);
894*4882a593Smuzhiyun 	return ret;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun /*
898*4882a593Smuzhiyun  * remove an object from whatever queue it's waiting on
899*4882a593Smuzhiyun  */
fscache_dequeue_object(struct fscache_object * object)900*4882a593Smuzhiyun static void fscache_dequeue_object(struct fscache_object *object)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun 	_enter("{OBJ%x}", object->debug_id);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (!list_empty(&object->dep_link)) {
905*4882a593Smuzhiyun 		spin_lock(&object->parent->lock);
906*4882a593Smuzhiyun 		list_del_init(&object->dep_link);
907*4882a593Smuzhiyun 		spin_unlock(&object->parent->lock);
908*4882a593Smuzhiyun 	}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	_leave("");
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun /**
914*4882a593Smuzhiyun  * fscache_check_aux - Ask the netfs whether an object on disk is still valid
915*4882a593Smuzhiyun  * @object: The object to ask about
916*4882a593Smuzhiyun  * @data: The auxiliary data for the object
917*4882a593Smuzhiyun  * @datalen: The size of the auxiliary data
918*4882a593Smuzhiyun  *
919*4882a593Smuzhiyun  * This function consults the netfs about the coherency state of an object.
920*4882a593Smuzhiyun  * The caller must be holding a ref on cookie->n_active (held by
921*4882a593Smuzhiyun  * fscache_look_up_object() on behalf of the cache backend during object lookup
922*4882a593Smuzhiyun  * and creation).
923*4882a593Smuzhiyun  */
fscache_check_aux(struct fscache_object * object,const void * data,uint16_t datalen,loff_t object_size)924*4882a593Smuzhiyun enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
925*4882a593Smuzhiyun 					const void *data, uint16_t datalen,
926*4882a593Smuzhiyun 					loff_t object_size)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun 	enum fscache_checkaux result;
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	if (!object->cookie->def->check_aux) {
931*4882a593Smuzhiyun 		fscache_stat(&fscache_n_checkaux_none);
932*4882a593Smuzhiyun 		return FSCACHE_CHECKAUX_OKAY;
933*4882a593Smuzhiyun 	}
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	result = object->cookie->def->check_aux(object->cookie->netfs_data,
936*4882a593Smuzhiyun 						data, datalen, object_size);
937*4882a593Smuzhiyun 	switch (result) {
938*4882a593Smuzhiyun 		/* entry okay as is */
939*4882a593Smuzhiyun 	case FSCACHE_CHECKAUX_OKAY:
940*4882a593Smuzhiyun 		fscache_stat(&fscache_n_checkaux_okay);
941*4882a593Smuzhiyun 		break;
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 		/* entry requires update */
944*4882a593Smuzhiyun 	case FSCACHE_CHECKAUX_NEEDS_UPDATE:
945*4882a593Smuzhiyun 		fscache_stat(&fscache_n_checkaux_update);
946*4882a593Smuzhiyun 		break;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 		/* entry requires deletion */
949*4882a593Smuzhiyun 	case FSCACHE_CHECKAUX_OBSOLETE:
950*4882a593Smuzhiyun 		fscache_stat(&fscache_n_checkaux_obsolete);
951*4882a593Smuzhiyun 		break;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	default:
954*4882a593Smuzhiyun 		BUG();
955*4882a593Smuzhiyun 	}
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	return result;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_check_aux);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun /*
962*4882a593Smuzhiyun  * Asynchronously invalidate an object.
963*4882a593Smuzhiyun  */
_fscache_invalidate_object(struct fscache_object * object,int event)964*4882a593Smuzhiyun static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
965*4882a593Smuzhiyun 							      int event)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun 	struct fscache_operation *op;
968*4882a593Smuzhiyun 	struct fscache_cookie *cookie = object->cookie;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	_enter("{OBJ%x},%d", object->debug_id, event);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	/* We're going to need the cookie.  If the cookie is not available then
973*4882a593Smuzhiyun 	 * retire the object instead.
974*4882a593Smuzhiyun 	 */
975*4882a593Smuzhiyun 	if (!fscache_use_cookie(object)) {
976*4882a593Smuzhiyun 		ASSERT(radix_tree_empty(&object->cookie->stores));
977*4882a593Smuzhiyun 		set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
978*4882a593Smuzhiyun 		_leave(" [no cookie]");
979*4882a593Smuzhiyun 		return transit_to(KILL_OBJECT);
980*4882a593Smuzhiyun 	}
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	/* Reject any new read/write ops and abort any that are pending. */
983*4882a593Smuzhiyun 	fscache_invalidate_writes(cookie);
984*4882a593Smuzhiyun 	clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
985*4882a593Smuzhiyun 	fscache_cancel_all_ops(object);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	/* Now we have to wait for in-progress reads and writes */
988*4882a593Smuzhiyun 	op = kzalloc(sizeof(*op), GFP_KERNEL);
989*4882a593Smuzhiyun 	if (!op)
990*4882a593Smuzhiyun 		goto nomem;
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	fscache_operation_init(cookie, op, object->cache->ops->invalidate_object,
993*4882a593Smuzhiyun 			       NULL, NULL);
994*4882a593Smuzhiyun 	op->flags = FSCACHE_OP_ASYNC |
995*4882a593Smuzhiyun 		(1 << FSCACHE_OP_EXCLUSIVE) |
996*4882a593Smuzhiyun 		(1 << FSCACHE_OP_UNUSE_COOKIE);
997*4882a593Smuzhiyun 	trace_fscache_page_op(cookie, NULL, op, fscache_page_op_invalidate);
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	spin_lock(&cookie->lock);
1000*4882a593Smuzhiyun 	if (fscache_submit_exclusive_op(object, op) < 0)
1001*4882a593Smuzhiyun 		goto submit_op_failed;
1002*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
1003*4882a593Smuzhiyun 	fscache_put_operation(op);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	/* Once we've completed the invalidation, we know there will be no data
1006*4882a593Smuzhiyun 	 * stored in the cache and thus we can reinstate the data-check-skip
1007*4882a593Smuzhiyun 	 * optimisation.
1008*4882a593Smuzhiyun 	 */
1009*4882a593Smuzhiyun 	set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	/* We can allow read and write requests to come in once again.  They'll
1012*4882a593Smuzhiyun 	 * queue up behind our exclusive invalidation operation.
1013*4882a593Smuzhiyun 	 */
1014*4882a593Smuzhiyun 	if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
1015*4882a593Smuzhiyun 		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
1016*4882a593Smuzhiyun 	_leave(" [ok]");
1017*4882a593Smuzhiyun 	return transit_to(UPDATE_OBJECT);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun nomem:
1020*4882a593Smuzhiyun 	fscache_mark_object_dead(object);
1021*4882a593Smuzhiyun 	fscache_unuse_cookie(object);
1022*4882a593Smuzhiyun 	_leave(" [ENOMEM]");
1023*4882a593Smuzhiyun 	return transit_to(KILL_OBJECT);
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun submit_op_failed:
1026*4882a593Smuzhiyun 	fscache_mark_object_dead(object);
1027*4882a593Smuzhiyun 	spin_unlock(&cookie->lock);
1028*4882a593Smuzhiyun 	fscache_unuse_cookie(object);
1029*4882a593Smuzhiyun 	kfree(op);
1030*4882a593Smuzhiyun 	_leave(" [EIO]");
1031*4882a593Smuzhiyun 	return transit_to(KILL_OBJECT);
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun 
fscache_invalidate_object(struct fscache_object * object,int event)1034*4882a593Smuzhiyun static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
1035*4882a593Smuzhiyun 							     int event)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun 	const struct fscache_state *s;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	fscache_stat(&fscache_n_invalidates_run);
1040*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cop_invalidate_object);
1041*4882a593Smuzhiyun 	s = _fscache_invalidate_object(object, event);
1042*4882a593Smuzhiyun 	fscache_stat_d(&fscache_n_cop_invalidate_object);
1043*4882a593Smuzhiyun 	return s;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun /*
1047*4882a593Smuzhiyun  * Update auxiliary data.
1048*4882a593Smuzhiyun  */
fscache_update_aux_data(struct fscache_object * object)1049*4882a593Smuzhiyun static void fscache_update_aux_data(struct fscache_object *object)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun 	fscache_stat(&fscache_n_updates_run);
1052*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cop_update_object);
1053*4882a593Smuzhiyun 	object->cache->ops->update_object(object);
1054*4882a593Smuzhiyun 	fscache_stat_d(&fscache_n_cop_update_object);
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun /*
1058*4882a593Smuzhiyun  * Asynchronously update an object.
1059*4882a593Smuzhiyun  */
fscache_update_object(struct fscache_object * object,int event)1060*4882a593Smuzhiyun static const struct fscache_state *fscache_update_object(struct fscache_object *object,
1061*4882a593Smuzhiyun 							 int event)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun 	_enter("{OBJ%x},%d", object->debug_id, event);
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	fscache_update_aux_data(object);
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	_leave("");
1068*4882a593Smuzhiyun 	return transit_to(WAIT_FOR_CMD);
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun /**
1072*4882a593Smuzhiyun  * fscache_object_retrying_stale - Note retrying stale object
1073*4882a593Smuzhiyun  * @object: The object that will be retried
1074*4882a593Smuzhiyun  *
1075*4882a593Smuzhiyun  * Note that an object lookup found an on-disk object that was adjudged to be
1076*4882a593Smuzhiyun  * stale and has been deleted.  The lookup will be retried.
1077*4882a593Smuzhiyun  */
fscache_object_retrying_stale(struct fscache_object * object)1078*4882a593Smuzhiyun void fscache_object_retrying_stale(struct fscache_object *object)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun 	fscache_stat(&fscache_n_cache_no_space_reject);
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_object_retrying_stale);
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun /**
1085*4882a593Smuzhiyun  * fscache_object_mark_killed - Note that an object was killed
1086*4882a593Smuzhiyun  * @object: The object that was culled
1087*4882a593Smuzhiyun  * @why: The reason the object was killed.
1088*4882a593Smuzhiyun  *
1089*4882a593Smuzhiyun  * Note that an object was killed.  Returns true if the object was
1090*4882a593Smuzhiyun  * already marked killed, false if it wasn't.
1091*4882a593Smuzhiyun  */
fscache_object_mark_killed(struct fscache_object * object,enum fscache_why_object_killed why)1092*4882a593Smuzhiyun void fscache_object_mark_killed(struct fscache_object *object,
1093*4882a593Smuzhiyun 				enum fscache_why_object_killed why)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun 	if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
1096*4882a593Smuzhiyun 		pr_err("Error: Object already killed by cache [%s]\n",
1097*4882a593Smuzhiyun 		       object->cache->identifier);
1098*4882a593Smuzhiyun 		return;
1099*4882a593Smuzhiyun 	}
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	switch (why) {
1102*4882a593Smuzhiyun 	case FSCACHE_OBJECT_NO_SPACE:
1103*4882a593Smuzhiyun 		fscache_stat(&fscache_n_cache_no_space_reject);
1104*4882a593Smuzhiyun 		break;
1105*4882a593Smuzhiyun 	case FSCACHE_OBJECT_IS_STALE:
1106*4882a593Smuzhiyun 		fscache_stat(&fscache_n_cache_stale_objects);
1107*4882a593Smuzhiyun 		break;
1108*4882a593Smuzhiyun 	case FSCACHE_OBJECT_WAS_RETIRED:
1109*4882a593Smuzhiyun 		fscache_stat(&fscache_n_cache_retired_objects);
1110*4882a593Smuzhiyun 		break;
1111*4882a593Smuzhiyun 	case FSCACHE_OBJECT_WAS_CULLED:
1112*4882a593Smuzhiyun 		fscache_stat(&fscache_n_cache_culled_objects);
1113*4882a593Smuzhiyun 		break;
1114*4882a593Smuzhiyun 	}
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_object_mark_killed);
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun /*
1119*4882a593Smuzhiyun  * The object is dead.  We can get here if an object gets queued by an event
1120*4882a593Smuzhiyun  * that would lead to its death (such as EV_KILL) when the dispatcher is
1121*4882a593Smuzhiyun  * already running (and so can be requeued) but hasn't yet cleared the event
1122*4882a593Smuzhiyun  * mask.
1123*4882a593Smuzhiyun  */
fscache_object_dead(struct fscache_object * object,int event)1124*4882a593Smuzhiyun static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
1125*4882a593Smuzhiyun 						       int event)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
1128*4882a593Smuzhiyun 			      &object->flags))
1129*4882a593Smuzhiyun 		return NO_TRANSIT;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	WARN(true, "FS-Cache object redispatched after death");
1132*4882a593Smuzhiyun 	return NO_TRANSIT;
1133*4882a593Smuzhiyun }
1134