1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* FS-Cache worker operation management routines
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5*4882a593Smuzhiyun * Written by David Howells (dhowells@redhat.com)
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * See Documentation/filesystems/caching/operations.rst
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define FSCACHE_DEBUG_LEVEL OPERATION
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/seq_file.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include "internal.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun atomic_t fscache_op_debug_id;
17*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_op_debug_id);
18*4882a593Smuzhiyun
fscache_operation_dummy_cancel(struct fscache_operation * op)19*4882a593Smuzhiyun static void fscache_operation_dummy_cancel(struct fscache_operation *op)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /**
24*4882a593Smuzhiyun * fscache_operation_init - Do basic initialisation of an operation
25*4882a593Smuzhiyun * @op: The operation to initialise
26*4882a593Smuzhiyun * @release: The release function to assign
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * Do basic initialisation of an operation. The caller must still set flags,
29*4882a593Smuzhiyun * object and processor if needed.
30*4882a593Smuzhiyun */
fscache_operation_init(struct fscache_cookie * cookie,struct fscache_operation * op,fscache_operation_processor_t processor,fscache_operation_cancel_t cancel,fscache_operation_release_t release)31*4882a593Smuzhiyun void fscache_operation_init(struct fscache_cookie *cookie,
32*4882a593Smuzhiyun struct fscache_operation *op,
33*4882a593Smuzhiyun fscache_operation_processor_t processor,
34*4882a593Smuzhiyun fscache_operation_cancel_t cancel,
35*4882a593Smuzhiyun fscache_operation_release_t release)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun INIT_WORK(&op->work, fscache_op_work_func);
38*4882a593Smuzhiyun atomic_set(&op->usage, 1);
39*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_INITIALISED;
40*4882a593Smuzhiyun op->debug_id = atomic_inc_return(&fscache_op_debug_id);
41*4882a593Smuzhiyun op->processor = processor;
42*4882a593Smuzhiyun op->cancel = cancel ?: fscache_operation_dummy_cancel;
43*4882a593Smuzhiyun op->release = release;
44*4882a593Smuzhiyun INIT_LIST_HEAD(&op->pend_link);
45*4882a593Smuzhiyun fscache_stat(&fscache_n_op_initialised);
46*4882a593Smuzhiyun trace_fscache_op(cookie, op, fscache_op_init);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_operation_init);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun * fscache_enqueue_operation - Enqueue an operation for processing
52*4882a593Smuzhiyun * @op: The operation to enqueue
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * Enqueue an operation for processing by the FS-Cache thread pool.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * This will get its own ref on the object.
57*4882a593Smuzhiyun */
fscache_enqueue_operation(struct fscache_operation * op)58*4882a593Smuzhiyun void fscache_enqueue_operation(struct fscache_operation *op)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun struct fscache_cookie *cookie = op->object->cookie;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun _enter("{OBJ%x OP%x,%u}",
63*4882a593Smuzhiyun op->object->debug_id, op->debug_id, atomic_read(&op->usage));
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun ASSERT(list_empty(&op->pend_link));
66*4882a593Smuzhiyun ASSERT(op->processor != NULL);
67*4882a593Smuzhiyun ASSERT(fscache_object_is_available(op->object));
68*4882a593Smuzhiyun ASSERTCMP(atomic_read(&op->usage), >, 0);
69*4882a593Smuzhiyun ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
70*4882a593Smuzhiyun op->state, ==, FSCACHE_OP_ST_CANCELLED);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun fscache_stat(&fscache_n_op_enqueue);
73*4882a593Smuzhiyun switch (op->flags & FSCACHE_OP_TYPE) {
74*4882a593Smuzhiyun case FSCACHE_OP_ASYNC:
75*4882a593Smuzhiyun trace_fscache_op(cookie, op, fscache_op_enqueue_async);
76*4882a593Smuzhiyun _debug("queue async");
77*4882a593Smuzhiyun atomic_inc(&op->usage);
78*4882a593Smuzhiyun if (!queue_work(fscache_op_wq, &op->work))
79*4882a593Smuzhiyun fscache_put_operation(op);
80*4882a593Smuzhiyun break;
81*4882a593Smuzhiyun case FSCACHE_OP_MYTHREAD:
82*4882a593Smuzhiyun trace_fscache_op(cookie, op, fscache_op_enqueue_mythread);
83*4882a593Smuzhiyun _debug("queue for caller's attention");
84*4882a593Smuzhiyun break;
85*4882a593Smuzhiyun default:
86*4882a593Smuzhiyun pr_err("Unexpected op type %lx", op->flags);
87*4882a593Smuzhiyun BUG();
88*4882a593Smuzhiyun break;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_enqueue_operation);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * start an op running
95*4882a593Smuzhiyun */
fscache_run_op(struct fscache_object * object,struct fscache_operation * op)96*4882a593Smuzhiyun static void fscache_run_op(struct fscache_object *object,
97*4882a593Smuzhiyun struct fscache_operation *op)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_IN_PROGRESS;
102*4882a593Smuzhiyun object->n_in_progress++;
103*4882a593Smuzhiyun if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
104*4882a593Smuzhiyun wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
105*4882a593Smuzhiyun if (op->processor)
106*4882a593Smuzhiyun fscache_enqueue_operation(op);
107*4882a593Smuzhiyun else
108*4882a593Smuzhiyun trace_fscache_op(object->cookie, op, fscache_op_run);
109*4882a593Smuzhiyun fscache_stat(&fscache_n_op_run);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * report an unexpected submission
114*4882a593Smuzhiyun */
fscache_report_unexpected_submission(struct fscache_object * object,struct fscache_operation * op,const struct fscache_state * ostate)115*4882a593Smuzhiyun static void fscache_report_unexpected_submission(struct fscache_object *object,
116*4882a593Smuzhiyun struct fscache_operation *op,
117*4882a593Smuzhiyun const struct fscache_state *ostate)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun static bool once_only;
120*4882a593Smuzhiyun struct fscache_operation *p;
121*4882a593Smuzhiyun unsigned n;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (once_only)
124*4882a593Smuzhiyun return;
125*4882a593Smuzhiyun once_only = true;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun kdebug("unexpected submission OP%x [OBJ%x %s]",
128*4882a593Smuzhiyun op->debug_id, object->debug_id, object->state->name);
129*4882a593Smuzhiyun kdebug("objstate=%s [%s]", object->state->name, ostate->name);
130*4882a593Smuzhiyun kdebug("objflags=%lx", object->flags);
131*4882a593Smuzhiyun kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
132*4882a593Smuzhiyun kdebug("ops=%u inp=%u exc=%u",
133*4882a593Smuzhiyun object->n_ops, object->n_in_progress, object->n_exclusive);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (!list_empty(&object->pending_ops)) {
136*4882a593Smuzhiyun n = 0;
137*4882a593Smuzhiyun list_for_each_entry(p, &object->pending_ops, pend_link) {
138*4882a593Smuzhiyun ASSERTCMP(p->object, ==, object);
139*4882a593Smuzhiyun kdebug("%p %p", op->processor, op->release);
140*4882a593Smuzhiyun n++;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun kdebug("n=%u", n);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun dump_stack();
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * submit an exclusive operation for an object
151*4882a593Smuzhiyun * - other ops are excluded from running simultaneously with this one
152*4882a593Smuzhiyun * - this gets any extra refs it needs on an op
153*4882a593Smuzhiyun */
fscache_submit_exclusive_op(struct fscache_object * object,struct fscache_operation * op)154*4882a593Smuzhiyun int fscache_submit_exclusive_op(struct fscache_object *object,
155*4882a593Smuzhiyun struct fscache_operation *op)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun const struct fscache_state *ostate;
158*4882a593Smuzhiyun unsigned long flags;
159*4882a593Smuzhiyun int ret;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun trace_fscache_op(object->cookie, op, fscache_op_submit_ex);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
166*4882a593Smuzhiyun ASSERTCMP(atomic_read(&op->usage), >, 0);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun spin_lock(&object->lock);
169*4882a593Smuzhiyun ASSERTCMP(object->n_ops, >=, object->n_in_progress);
170*4882a593Smuzhiyun ASSERTCMP(object->n_ops, >=, object->n_exclusive);
171*4882a593Smuzhiyun ASSERT(list_empty(&op->pend_link));
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun ostate = object->state;
174*4882a593Smuzhiyun smp_rmb();
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_PENDING;
177*4882a593Smuzhiyun flags = READ_ONCE(object->flags);
178*4882a593Smuzhiyun if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
179*4882a593Smuzhiyun fscache_stat(&fscache_n_op_rejected);
180*4882a593Smuzhiyun op->cancel(op);
181*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
182*4882a593Smuzhiyun ret = -ENOBUFS;
183*4882a593Smuzhiyun } else if (unlikely(fscache_cache_is_broken(object))) {
184*4882a593Smuzhiyun op->cancel(op);
185*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
186*4882a593Smuzhiyun ret = -EIO;
187*4882a593Smuzhiyun } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
188*4882a593Smuzhiyun op->object = object;
189*4882a593Smuzhiyun object->n_ops++;
190*4882a593Smuzhiyun object->n_exclusive++; /* reads and writes must wait */
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (object->n_in_progress > 0) {
193*4882a593Smuzhiyun atomic_inc(&op->usage);
194*4882a593Smuzhiyun list_add_tail(&op->pend_link, &object->pending_ops);
195*4882a593Smuzhiyun fscache_stat(&fscache_n_op_pend);
196*4882a593Smuzhiyun } else if (!list_empty(&object->pending_ops)) {
197*4882a593Smuzhiyun atomic_inc(&op->usage);
198*4882a593Smuzhiyun list_add_tail(&op->pend_link, &object->pending_ops);
199*4882a593Smuzhiyun fscache_stat(&fscache_n_op_pend);
200*4882a593Smuzhiyun fscache_start_operations(object);
201*4882a593Smuzhiyun } else {
202*4882a593Smuzhiyun ASSERTCMP(object->n_in_progress, ==, 0);
203*4882a593Smuzhiyun fscache_run_op(object, op);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* need to issue a new write op after this */
207*4882a593Smuzhiyun clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
208*4882a593Smuzhiyun ret = 0;
209*4882a593Smuzhiyun } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
210*4882a593Smuzhiyun op->object = object;
211*4882a593Smuzhiyun object->n_ops++;
212*4882a593Smuzhiyun object->n_exclusive++; /* reads and writes must wait */
213*4882a593Smuzhiyun atomic_inc(&op->usage);
214*4882a593Smuzhiyun list_add_tail(&op->pend_link, &object->pending_ops);
215*4882a593Smuzhiyun fscache_stat(&fscache_n_op_pend);
216*4882a593Smuzhiyun ret = 0;
217*4882a593Smuzhiyun } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
218*4882a593Smuzhiyun op->cancel(op);
219*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
220*4882a593Smuzhiyun ret = -ENOBUFS;
221*4882a593Smuzhiyun } else {
222*4882a593Smuzhiyun fscache_report_unexpected_submission(object, op, ostate);
223*4882a593Smuzhiyun op->cancel(op);
224*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
225*4882a593Smuzhiyun ret = -ENOBUFS;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun spin_unlock(&object->lock);
229*4882a593Smuzhiyun return ret;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * submit an operation for an object
234*4882a593Smuzhiyun * - objects may be submitted only in the following states:
235*4882a593Smuzhiyun * - during object creation (write ops may be submitted)
236*4882a593Smuzhiyun * - whilst the object is active
237*4882a593Smuzhiyun * - after an I/O error incurred in one of the two above states (op rejected)
238*4882a593Smuzhiyun * - this gets any extra refs it needs on an op
239*4882a593Smuzhiyun */
fscache_submit_op(struct fscache_object * object,struct fscache_operation * op)240*4882a593Smuzhiyun int fscache_submit_op(struct fscache_object *object,
241*4882a593Smuzhiyun struct fscache_operation *op)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun const struct fscache_state *ostate;
244*4882a593Smuzhiyun unsigned long flags;
245*4882a593Smuzhiyun int ret;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun _enter("{OBJ%x OP%x},{%u}",
248*4882a593Smuzhiyun object->debug_id, op->debug_id, atomic_read(&op->usage));
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun trace_fscache_op(object->cookie, op, fscache_op_submit);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
253*4882a593Smuzhiyun ASSERTCMP(atomic_read(&op->usage), >, 0);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun spin_lock(&object->lock);
256*4882a593Smuzhiyun ASSERTCMP(object->n_ops, >=, object->n_in_progress);
257*4882a593Smuzhiyun ASSERTCMP(object->n_ops, >=, object->n_exclusive);
258*4882a593Smuzhiyun ASSERT(list_empty(&op->pend_link));
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun ostate = object->state;
261*4882a593Smuzhiyun smp_rmb();
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_PENDING;
264*4882a593Smuzhiyun flags = READ_ONCE(object->flags);
265*4882a593Smuzhiyun if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
266*4882a593Smuzhiyun fscache_stat(&fscache_n_op_rejected);
267*4882a593Smuzhiyun op->cancel(op);
268*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
269*4882a593Smuzhiyun ret = -ENOBUFS;
270*4882a593Smuzhiyun } else if (unlikely(fscache_cache_is_broken(object))) {
271*4882a593Smuzhiyun op->cancel(op);
272*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
273*4882a593Smuzhiyun ret = -EIO;
274*4882a593Smuzhiyun } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
275*4882a593Smuzhiyun op->object = object;
276*4882a593Smuzhiyun object->n_ops++;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (object->n_exclusive > 0) {
279*4882a593Smuzhiyun atomic_inc(&op->usage);
280*4882a593Smuzhiyun list_add_tail(&op->pend_link, &object->pending_ops);
281*4882a593Smuzhiyun fscache_stat(&fscache_n_op_pend);
282*4882a593Smuzhiyun } else if (!list_empty(&object->pending_ops)) {
283*4882a593Smuzhiyun atomic_inc(&op->usage);
284*4882a593Smuzhiyun list_add_tail(&op->pend_link, &object->pending_ops);
285*4882a593Smuzhiyun fscache_stat(&fscache_n_op_pend);
286*4882a593Smuzhiyun fscache_start_operations(object);
287*4882a593Smuzhiyun } else {
288*4882a593Smuzhiyun ASSERTCMP(object->n_exclusive, ==, 0);
289*4882a593Smuzhiyun fscache_run_op(object, op);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun ret = 0;
292*4882a593Smuzhiyun } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
293*4882a593Smuzhiyun op->object = object;
294*4882a593Smuzhiyun object->n_ops++;
295*4882a593Smuzhiyun atomic_inc(&op->usage);
296*4882a593Smuzhiyun list_add_tail(&op->pend_link, &object->pending_ops);
297*4882a593Smuzhiyun fscache_stat(&fscache_n_op_pend);
298*4882a593Smuzhiyun ret = 0;
299*4882a593Smuzhiyun } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
300*4882a593Smuzhiyun op->cancel(op);
301*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
302*4882a593Smuzhiyun ret = -ENOBUFS;
303*4882a593Smuzhiyun } else {
304*4882a593Smuzhiyun fscache_report_unexpected_submission(object, op, ostate);
305*4882a593Smuzhiyun ASSERT(!fscache_object_is_active(object));
306*4882a593Smuzhiyun op->cancel(op);
307*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
308*4882a593Smuzhiyun ret = -ENOBUFS;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun spin_unlock(&object->lock);
312*4882a593Smuzhiyun return ret;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /*
316*4882a593Smuzhiyun * queue an object for withdrawal on error, aborting all following asynchronous
317*4882a593Smuzhiyun * operations
318*4882a593Smuzhiyun */
fscache_abort_object(struct fscache_object * object)319*4882a593Smuzhiyun void fscache_abort_object(struct fscache_object *object)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun _enter("{OBJ%x}", object->debug_id);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * Jump start the operation processing on an object. The caller must hold
328*4882a593Smuzhiyun * object->lock.
329*4882a593Smuzhiyun */
fscache_start_operations(struct fscache_object * object)330*4882a593Smuzhiyun void fscache_start_operations(struct fscache_object *object)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun struct fscache_operation *op;
333*4882a593Smuzhiyun bool stop = false;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun while (!list_empty(&object->pending_ops) && !stop) {
336*4882a593Smuzhiyun op = list_entry(object->pending_ops.next,
337*4882a593Smuzhiyun struct fscache_operation, pend_link);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
340*4882a593Smuzhiyun if (object->n_in_progress > 0)
341*4882a593Smuzhiyun break;
342*4882a593Smuzhiyun stop = true;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun list_del_init(&op->pend_link);
345*4882a593Smuzhiyun fscache_run_op(object, op);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* the pending queue was holding a ref on the object */
348*4882a593Smuzhiyun fscache_put_operation(op);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun ASSERTCMP(object->n_in_progress, <=, object->n_ops);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun _debug("woke %d ops on OBJ%x",
354*4882a593Smuzhiyun object->n_in_progress, object->debug_id);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun * cancel an operation that's pending on an object
359*4882a593Smuzhiyun */
fscache_cancel_op(struct fscache_operation * op,bool cancel_in_progress_op)360*4882a593Smuzhiyun int fscache_cancel_op(struct fscache_operation *op,
361*4882a593Smuzhiyun bool cancel_in_progress_op)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun struct fscache_object *object = op->object;
364*4882a593Smuzhiyun bool put = false;
365*4882a593Smuzhiyun int ret;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun trace_fscache_op(object->cookie, op, fscache_op_cancel);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
372*4882a593Smuzhiyun ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
373*4882a593Smuzhiyun ASSERTCMP(atomic_read(&op->usage), >, 0);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun spin_lock(&object->lock);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun ret = -EBUSY;
378*4882a593Smuzhiyun if (op->state == FSCACHE_OP_ST_PENDING) {
379*4882a593Smuzhiyun ASSERT(!list_empty(&op->pend_link));
380*4882a593Smuzhiyun list_del_init(&op->pend_link);
381*4882a593Smuzhiyun put = true;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun fscache_stat(&fscache_n_op_cancelled);
384*4882a593Smuzhiyun op->cancel(op);
385*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
386*4882a593Smuzhiyun if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
387*4882a593Smuzhiyun object->n_exclusive--;
388*4882a593Smuzhiyun if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
389*4882a593Smuzhiyun wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
390*4882a593Smuzhiyun ret = 0;
391*4882a593Smuzhiyun } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
392*4882a593Smuzhiyun ASSERTCMP(object->n_in_progress, >, 0);
393*4882a593Smuzhiyun if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
394*4882a593Smuzhiyun object->n_exclusive--;
395*4882a593Smuzhiyun object->n_in_progress--;
396*4882a593Smuzhiyun if (object->n_in_progress == 0)
397*4882a593Smuzhiyun fscache_start_operations(object);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun fscache_stat(&fscache_n_op_cancelled);
400*4882a593Smuzhiyun op->cancel(op);
401*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
402*4882a593Smuzhiyun if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
403*4882a593Smuzhiyun object->n_exclusive--;
404*4882a593Smuzhiyun if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
405*4882a593Smuzhiyun wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
406*4882a593Smuzhiyun ret = 0;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun if (put)
410*4882a593Smuzhiyun fscache_put_operation(op);
411*4882a593Smuzhiyun spin_unlock(&object->lock);
412*4882a593Smuzhiyun _leave(" = %d", ret);
413*4882a593Smuzhiyun return ret;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun * Cancel all pending operations on an object
418*4882a593Smuzhiyun */
fscache_cancel_all_ops(struct fscache_object * object)419*4882a593Smuzhiyun void fscache_cancel_all_ops(struct fscache_object *object)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun struct fscache_operation *op;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun _enter("OBJ%x", object->debug_id);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun spin_lock(&object->lock);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun while (!list_empty(&object->pending_ops)) {
428*4882a593Smuzhiyun op = list_entry(object->pending_ops.next,
429*4882a593Smuzhiyun struct fscache_operation, pend_link);
430*4882a593Smuzhiyun fscache_stat(&fscache_n_op_cancelled);
431*4882a593Smuzhiyun list_del_init(&op->pend_link);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun trace_fscache_op(object->cookie, op, fscache_op_cancel_all);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
436*4882a593Smuzhiyun op->cancel(op);
437*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
440*4882a593Smuzhiyun object->n_exclusive--;
441*4882a593Smuzhiyun if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
442*4882a593Smuzhiyun wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
443*4882a593Smuzhiyun fscache_put_operation(op);
444*4882a593Smuzhiyun cond_resched_lock(&object->lock);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun spin_unlock(&object->lock);
448*4882a593Smuzhiyun _leave("");
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /*
452*4882a593Smuzhiyun * Record the completion or cancellation of an in-progress operation.
453*4882a593Smuzhiyun */
fscache_op_complete(struct fscache_operation * op,bool cancelled)454*4882a593Smuzhiyun void fscache_op_complete(struct fscache_operation *op, bool cancelled)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun struct fscache_object *object = op->object;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun _enter("OBJ%x", object->debug_id);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
461*4882a593Smuzhiyun ASSERTCMP(object->n_in_progress, >, 0);
462*4882a593Smuzhiyun ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
463*4882a593Smuzhiyun object->n_exclusive, >, 0);
464*4882a593Smuzhiyun ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
465*4882a593Smuzhiyun object->n_in_progress, ==, 1);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun spin_lock(&object->lock);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun if (!cancelled) {
470*4882a593Smuzhiyun trace_fscache_op(object->cookie, op, fscache_op_completed);
471*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_COMPLETE;
472*4882a593Smuzhiyun } else {
473*4882a593Smuzhiyun op->cancel(op);
474*4882a593Smuzhiyun trace_fscache_op(object->cookie, op, fscache_op_cancelled);
475*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_CANCELLED;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
479*4882a593Smuzhiyun object->n_exclusive--;
480*4882a593Smuzhiyun object->n_in_progress--;
481*4882a593Smuzhiyun if (object->n_in_progress == 0)
482*4882a593Smuzhiyun fscache_start_operations(object);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun spin_unlock(&object->lock);
485*4882a593Smuzhiyun _leave("");
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_op_complete);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /*
490*4882a593Smuzhiyun * release an operation
491*4882a593Smuzhiyun * - queues pending ops if this is the last in-progress op
492*4882a593Smuzhiyun */
fscache_put_operation(struct fscache_operation * op)493*4882a593Smuzhiyun void fscache_put_operation(struct fscache_operation *op)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun struct fscache_object *object;
496*4882a593Smuzhiyun struct fscache_cache *cache;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun _enter("{OBJ%x OP%x,%d}",
499*4882a593Smuzhiyun op->object ? op->object->debug_id : 0,
500*4882a593Smuzhiyun op->debug_id, atomic_read(&op->usage));
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun ASSERTCMP(atomic_read(&op->usage), >, 0);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if (!atomic_dec_and_test(&op->usage))
505*4882a593Smuzhiyun return;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun trace_fscache_op(op->object ? op->object->cookie : NULL, op, fscache_op_put);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun _debug("PUT OP");
510*4882a593Smuzhiyun ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
511*4882a593Smuzhiyun op->state != FSCACHE_OP_ST_COMPLETE,
512*4882a593Smuzhiyun op->state, ==, FSCACHE_OP_ST_CANCELLED);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun fscache_stat(&fscache_n_op_release);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun if (op->release) {
517*4882a593Smuzhiyun op->release(op);
518*4882a593Smuzhiyun op->release = NULL;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun op->state = FSCACHE_OP_ST_DEAD;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun object = op->object;
523*4882a593Smuzhiyun if (likely(object)) {
524*4882a593Smuzhiyun if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
525*4882a593Smuzhiyun atomic_dec(&object->n_reads);
526*4882a593Smuzhiyun if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
527*4882a593Smuzhiyun fscache_unuse_cookie(object);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* now... we may get called with the object spinlock held, so we
530*4882a593Smuzhiyun * complete the cleanup here only if we can immediately acquire the
531*4882a593Smuzhiyun * lock, and defer it otherwise */
532*4882a593Smuzhiyun if (!spin_trylock(&object->lock)) {
533*4882a593Smuzhiyun _debug("defer put");
534*4882a593Smuzhiyun fscache_stat(&fscache_n_op_deferred_release);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun cache = object->cache;
537*4882a593Smuzhiyun spin_lock(&cache->op_gc_list_lock);
538*4882a593Smuzhiyun list_add_tail(&op->pend_link, &cache->op_gc_list);
539*4882a593Smuzhiyun spin_unlock(&cache->op_gc_list_lock);
540*4882a593Smuzhiyun schedule_work(&cache->op_gc);
541*4882a593Smuzhiyun _leave(" [defer]");
542*4882a593Smuzhiyun return;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun ASSERTCMP(object->n_ops, >, 0);
546*4882a593Smuzhiyun object->n_ops--;
547*4882a593Smuzhiyun if (object->n_ops == 0)
548*4882a593Smuzhiyun fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun spin_unlock(&object->lock);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun kfree(op);
554*4882a593Smuzhiyun _leave(" [done]");
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun EXPORT_SYMBOL(fscache_put_operation);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /*
559*4882a593Smuzhiyun * garbage collect operations that have had their release deferred
560*4882a593Smuzhiyun */
fscache_operation_gc(struct work_struct * work)561*4882a593Smuzhiyun void fscache_operation_gc(struct work_struct *work)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun struct fscache_operation *op;
564*4882a593Smuzhiyun struct fscache_object *object;
565*4882a593Smuzhiyun struct fscache_cache *cache =
566*4882a593Smuzhiyun container_of(work, struct fscache_cache, op_gc);
567*4882a593Smuzhiyun int count = 0;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun _enter("");
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun do {
572*4882a593Smuzhiyun spin_lock(&cache->op_gc_list_lock);
573*4882a593Smuzhiyun if (list_empty(&cache->op_gc_list)) {
574*4882a593Smuzhiyun spin_unlock(&cache->op_gc_list_lock);
575*4882a593Smuzhiyun break;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun op = list_entry(cache->op_gc_list.next,
579*4882a593Smuzhiyun struct fscache_operation, pend_link);
580*4882a593Smuzhiyun list_del(&op->pend_link);
581*4882a593Smuzhiyun spin_unlock(&cache->op_gc_list_lock);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun object = op->object;
584*4882a593Smuzhiyun trace_fscache_op(object->cookie, op, fscache_op_gc);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun spin_lock(&object->lock);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun _debug("GC DEFERRED REL OBJ%x OP%x",
589*4882a593Smuzhiyun object->debug_id, op->debug_id);
590*4882a593Smuzhiyun fscache_stat(&fscache_n_op_gc);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun ASSERTCMP(atomic_read(&op->usage), ==, 0);
593*4882a593Smuzhiyun ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun ASSERTCMP(object->n_ops, >, 0);
596*4882a593Smuzhiyun object->n_ops--;
597*4882a593Smuzhiyun if (object->n_ops == 0)
598*4882a593Smuzhiyun fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun spin_unlock(&object->lock);
601*4882a593Smuzhiyun kfree(op);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun } while (count++ < 20);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun if (!list_empty(&cache->op_gc_list))
606*4882a593Smuzhiyun schedule_work(&cache->op_gc);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun _leave("");
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /*
612*4882a593Smuzhiyun * execute an operation using fs_op_wq to provide processing context -
613*4882a593Smuzhiyun * the caller holds a ref to this object, so we don't need to hold one
614*4882a593Smuzhiyun */
fscache_op_work_func(struct work_struct * work)615*4882a593Smuzhiyun void fscache_op_work_func(struct work_struct *work)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun struct fscache_operation *op =
618*4882a593Smuzhiyun container_of(work, struct fscache_operation, work);
619*4882a593Smuzhiyun unsigned long start;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun _enter("{OBJ%x OP%x,%d}",
622*4882a593Smuzhiyun op->object->debug_id, op->debug_id, atomic_read(&op->usage));
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun trace_fscache_op(op->object->cookie, op, fscache_op_work);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun ASSERT(op->processor != NULL);
627*4882a593Smuzhiyun start = jiffies;
628*4882a593Smuzhiyun op->processor(op);
629*4882a593Smuzhiyun fscache_hist(fscache_ops_histogram, start);
630*4882a593Smuzhiyun fscache_put_operation(op);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun _leave("");
633*4882a593Smuzhiyun }
634