1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2016 CNEX Labs
4*4882a593Smuzhiyun * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5*4882a593Smuzhiyun * Matias Bjorling <matias@cnexlabs.com>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or
8*4882a593Smuzhiyun * modify it under the terms of the GNU General Public License version
9*4882a593Smuzhiyun * 2 as published by the Free Software Foundation.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
12*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
13*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14*4882a593Smuzhiyun * General Public License for more details.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * pblk-cache.c - pblk's write cache
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "pblk.h"
20*4882a593Smuzhiyun
pblk_write_to_cache(struct pblk * pblk,struct bio * bio,unsigned long flags)21*4882a593Smuzhiyun void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
22*4882a593Smuzhiyun unsigned long flags)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun struct pblk_w_ctx w_ctx;
25*4882a593Smuzhiyun sector_t lba = pblk_get_lba(bio);
26*4882a593Smuzhiyun unsigned long start_time;
27*4882a593Smuzhiyun unsigned int bpos, pos;
28*4882a593Smuzhiyun int nr_entries = pblk_get_secs(bio);
29*4882a593Smuzhiyun int i, ret;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun start_time = bio_start_io_acct(bio);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* Update the write buffer head (mem) with the entries that we can
34*4882a593Smuzhiyun * write. The write in itself cannot fail, so there is no need to
35*4882a593Smuzhiyun * rollback from here on.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun retry:
38*4882a593Smuzhiyun ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos);
39*4882a593Smuzhiyun switch (ret) {
40*4882a593Smuzhiyun case NVM_IO_REQUEUE:
41*4882a593Smuzhiyun io_schedule();
42*4882a593Smuzhiyun goto retry;
43*4882a593Smuzhiyun case NVM_IO_ERR:
44*4882a593Smuzhiyun pblk_pipeline_stop(pblk);
45*4882a593Smuzhiyun bio_io_error(bio);
46*4882a593Smuzhiyun goto out;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun pblk_ppa_set_empty(&w_ctx.ppa);
50*4882a593Smuzhiyun w_ctx.flags = flags;
51*4882a593Smuzhiyun if (bio->bi_opf & REQ_PREFLUSH) {
52*4882a593Smuzhiyun w_ctx.flags |= PBLK_FLUSH_ENTRY;
53*4882a593Smuzhiyun pblk_write_kick(pblk);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if (unlikely(!bio_has_data(bio)))
57*4882a593Smuzhiyun goto out;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun for (i = 0; i < nr_entries; i++) {
60*4882a593Smuzhiyun void *data = bio_data(bio);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun w_ctx.lba = lba + i;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + i);
65*4882a593Smuzhiyun pblk_rb_write_entry_user(&pblk->rwb, data, w_ctx, pos);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun atomic64_add(nr_entries, &pblk->user_wa);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #ifdef CONFIG_NVM_PBLK_DEBUG
73*4882a593Smuzhiyun atomic_long_add(nr_entries, &pblk->inflight_writes);
74*4882a593Smuzhiyun atomic_long_add(nr_entries, &pblk->req_writes);
75*4882a593Smuzhiyun #endif
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun pblk_rl_inserted(&pblk->rl, nr_entries);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun out:
80*4882a593Smuzhiyun bio_end_io_acct(bio, start_time);
81*4882a593Smuzhiyun pblk_write_should_kick(pblk);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun if (ret == NVM_IO_DONE)
84*4882a593Smuzhiyun bio_endio(bio);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * On GC the incoming lbas are not necessarily sequential. Also, some of the
89*4882a593Smuzhiyun * lbas might not be valid entries, which are marked as empty by the GC thread
90*4882a593Smuzhiyun */
pblk_write_gc_to_cache(struct pblk * pblk,struct pblk_gc_rq * gc_rq)91*4882a593Smuzhiyun int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun struct pblk_w_ctx w_ctx;
94*4882a593Smuzhiyun unsigned int bpos, pos;
95*4882a593Smuzhiyun void *data = gc_rq->data;
96*4882a593Smuzhiyun int i, valid_entries;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* Update the write buffer head (mem) with the entries that we can
99*4882a593Smuzhiyun * write. The write in itself cannot fail, so there is no need to
100*4882a593Smuzhiyun * rollback from here on.
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun retry:
103*4882a593Smuzhiyun if (!pblk_rb_may_write_gc(&pblk->rwb, gc_rq->secs_to_gc, &bpos)) {
104*4882a593Smuzhiyun io_schedule();
105*4882a593Smuzhiyun goto retry;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun w_ctx.flags = PBLK_IOTYPE_GC;
109*4882a593Smuzhiyun pblk_ppa_set_empty(&w_ctx.ppa);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun for (i = 0, valid_entries = 0; i < gc_rq->nr_secs; i++) {
112*4882a593Smuzhiyun if (gc_rq->lba_list[i] == ADDR_EMPTY)
113*4882a593Smuzhiyun continue;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun w_ctx.lba = gc_rq->lba_list[i];
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + valid_entries);
118*4882a593Smuzhiyun pblk_rb_write_entry_gc(&pblk->rwb, data, w_ctx, gc_rq->line,
119*4882a593Smuzhiyun gc_rq->paddr_list[i], pos);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun data += PBLK_EXPOSED_PAGE_SIZE;
122*4882a593Smuzhiyun valid_entries++;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun WARN_ONCE(gc_rq->secs_to_gc != valid_entries,
126*4882a593Smuzhiyun "pblk: inconsistent GC write\n");
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun atomic64_add(valid_entries, &pblk->gc_wa);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun #ifdef CONFIG_NVM_PBLK_DEBUG
131*4882a593Smuzhiyun atomic_long_add(valid_entries, &pblk->inflight_writes);
132*4882a593Smuzhiyun atomic_long_add(valid_entries, &pblk->recov_gc_writes);
133*4882a593Smuzhiyun #endif
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun pblk_write_should_kick(pblk);
136*4882a593Smuzhiyun return NVM_IO_OK;
137*4882a593Smuzhiyun }
138