Lines Matching refs:c

76 void ubifs_ro_mode(struct ubifs_info *c, int err)  in ubifs_ro_mode()  argument
78 if (!c->ro_error) { in ubifs_ro_mode()
79 c->ro_error = 1; in ubifs_ro_mode()
80 c->no_chk_data_crc = 0; in ubifs_ro_mode()
81 c->vfs_sb->s_flags |= MS_RDONLY; in ubifs_ro_mode()
82 ubifs_warn(c, "switched to read-only mode, error %d", err); in ubifs_ro_mode()
93 int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, in ubifs_leb_read() argument
98 err = ubi_read(c->ubi, lnum, buf, offs, len); in ubifs_leb_read()
104 ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d", in ubifs_leb_read()
111 int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, in ubifs_leb_write() argument
116 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_leb_write()
117 if (c->ro_error) in ubifs_leb_write()
119 if (!dbg_is_tst_rcvry(c)) in ubifs_leb_write()
120 err = ubi_leb_write(c->ubi, lnum, buf, offs, len); in ubifs_leb_write()
123 err = dbg_leb_write(c, lnum, buf, offs, len); in ubifs_leb_write()
126 ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d", in ubifs_leb_write()
128 ubifs_ro_mode(c, err); in ubifs_leb_write()
134 int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len) in ubifs_leb_change() argument
138 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_leb_change()
139 if (c->ro_error) in ubifs_leb_change()
141 if (!dbg_is_tst_rcvry(c)) in ubifs_leb_change()
142 err = ubi_leb_change(c->ubi, lnum, buf, len); in ubifs_leb_change()
145 err = dbg_leb_change(c, lnum, buf, len); in ubifs_leb_change()
148 ubifs_err(c, "changing %d bytes in LEB %d failed, error %d", in ubifs_leb_change()
150 ubifs_ro_mode(c, err); in ubifs_leb_change()
156 int ubifs_leb_unmap(struct ubifs_info *c, int lnum) in ubifs_leb_unmap() argument
160 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_leb_unmap()
161 if (c->ro_error) in ubifs_leb_unmap()
163 if (!dbg_is_tst_rcvry(c)) in ubifs_leb_unmap()
164 err = ubi_leb_unmap(c->ubi, lnum); in ubifs_leb_unmap()
167 err = dbg_leb_unmap(c, lnum); in ubifs_leb_unmap()
170 ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err); in ubifs_leb_unmap()
171 ubifs_ro_mode(c, err); in ubifs_leb_unmap()
177 int ubifs_leb_map(struct ubifs_info *c, int lnum) in ubifs_leb_map() argument
181 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_leb_map()
182 if (c->ro_error) in ubifs_leb_map()
184 if (!dbg_is_tst_rcvry(c)) in ubifs_leb_map()
185 err = ubi_leb_map(c->ubi, lnum); in ubifs_leb_map()
188 err = dbg_leb_map(c, lnum); in ubifs_leb_map()
191 ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err); in ubifs_leb_map()
192 ubifs_ro_mode(c, err); in ubifs_leb_map()
198 int ubifs_is_mapped(const struct ubifs_info *c, int lnum) in ubifs_is_mapped() argument
202 err = ubi_is_mapped(c->ubi, lnum); in ubifs_is_mapped()
204 ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d", in ubifs_is_mapped()
239 int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, in ubifs_check_node() argument
246 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); in ubifs_check_node()
247 ubifs_assert(!(offs & 7) && offs < c->leb_size); in ubifs_check_node()
252 ubifs_err(c, "bad magic %#08x, expected %#08x", in ubifs_check_node()
261 ubifs_err(c, "bad node type %d", type); in ubifs_check_node()
266 if (node_len + offs > c->leb_size) in ubifs_check_node()
269 if (c->ranges[type].max_len == 0) { in ubifs_check_node()
270 if (node_len != c->ranges[type].len) in ubifs_check_node()
272 } else if (node_len < c->ranges[type].min_len || in ubifs_check_node()
273 node_len > c->ranges[type].max_len) in ubifs_check_node()
276 if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting && in ubifs_check_node()
277 !c->remounting_rw && c->no_chk_data_crc) in ubifs_check_node()
284 ubifs_err(c, "bad CRC: calculated %#08x, read %#08x", in ubifs_check_node()
294 ubifs_err(c, "bad node length %d", node_len); in ubifs_check_node()
297 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); in ubifs_check_node()
298 ubifs_dump_node(c, buf); in ubifs_check_node()
320 void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) in ubifs_pad() argument
350 static unsigned long long next_sqnum(struct ubifs_info *c) in next_sqnum() argument
354 spin_lock(&c->cnt_lock); in next_sqnum()
355 sqnum = ++c->max_sqnum; in next_sqnum()
356 spin_unlock(&c->cnt_lock); in next_sqnum()
360 ubifs_err(c, "sequence number overflow %llu, end of life", in next_sqnum()
362 ubifs_ro_mode(c, -EINVAL); in next_sqnum()
364 ubifs_warn(c, "running out of sequence numbers, end of life soon"); in next_sqnum()
381 void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) in ubifs_prepare_node() argument
385 unsigned long long sqnum = next_sqnum(c); in ubifs_prepare_node()
399 pad = ALIGN(len, c->min_io_size) - len; in ubifs_prepare_node()
400 ubifs_pad(c, node + len, pad); in ubifs_prepare_node()
414 void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last) in ubifs_prep_grp_node() argument
418 unsigned long long sqnum = next_sqnum(c); in ubifs_prep_grp_node()
447 wbuf->c->need_wbuf_sync = 1; in wbuf_timer_callback_nolock()
448 ubifs_wake_up_bgt(wbuf->c); in wbuf_timer_callback_nolock()
501 struct ubifs_info *c = wbuf->c; in ubifs_wbuf_sync_nolock() local
512 ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size); in ubifs_wbuf_sync_nolock()
513 ubifs_assert(wbuf->size >= c->min_io_size); in ubifs_wbuf_sync_nolock()
514 ubifs_assert(wbuf->size <= c->max_write_size); in ubifs_wbuf_sync_nolock()
515 ubifs_assert(wbuf->size % c->min_io_size == 0); in ubifs_wbuf_sync_nolock()
516 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_wbuf_sync_nolock()
517 if (c->leb_size - wbuf->offs >= c->max_write_size) in ubifs_wbuf_sync_nolock()
518 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); in ubifs_wbuf_sync_nolock()
520 if (c->ro_error) in ubifs_wbuf_sync_nolock()
527 sync_len = ALIGN(wbuf->used, c->min_io_size); in ubifs_wbuf_sync_nolock()
530 ubifs_pad(c, wbuf->buf + wbuf->used, dirt); in ubifs_wbuf_sync_nolock()
531 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len); in ubifs_wbuf_sync_nolock()
547 if (c->leb_size - wbuf->offs < c->max_write_size) in ubifs_wbuf_sync_nolock()
548 wbuf->size = c->leb_size - wbuf->offs; in ubifs_wbuf_sync_nolock()
549 else if (wbuf->offs & (c->max_write_size - 1)) in ubifs_wbuf_sync_nolock()
550 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; in ubifs_wbuf_sync_nolock()
552 wbuf->size = c->max_write_size; in ubifs_wbuf_sync_nolock()
559 err = wbuf->sync_callback(c, wbuf->lnum, in ubifs_wbuf_sync_nolock()
560 c->leb_size - wbuf->offs, dirt); in ubifs_wbuf_sync_nolock()
576 const struct ubifs_info *c = wbuf->c; in ubifs_wbuf_seek_nolock() local
579 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); in ubifs_wbuf_seek_nolock()
580 ubifs_assert(offs >= 0 && offs <= c->leb_size); in ubifs_wbuf_seek_nolock()
581 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); in ubifs_wbuf_seek_nolock()
588 if (c->leb_size - wbuf->offs < c->max_write_size) in ubifs_wbuf_seek_nolock()
589 wbuf->size = c->leb_size - wbuf->offs; in ubifs_wbuf_seek_nolock()
590 else if (wbuf->offs & (c->max_write_size - 1)) in ubifs_wbuf_seek_nolock()
591 wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; in ubifs_wbuf_seek_nolock()
593 wbuf->size = c->max_write_size; in ubifs_wbuf_seek_nolock()
610 int ubifs_bg_wbufs_sync(struct ubifs_info *c) in ubifs_bg_wbufs_sync() argument
614 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_bg_wbufs_sync()
615 if (!c->need_wbuf_sync) in ubifs_bg_wbufs_sync()
617 c->need_wbuf_sync = 0; in ubifs_bg_wbufs_sync()
619 if (c->ro_error) { in ubifs_bg_wbufs_sync()
625 for (i = 0; i < c->jhead_cnt; i++) { in ubifs_bg_wbufs_sync()
626 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; in ubifs_bg_wbufs_sync()
646 ubifs_err(c, "cannot sync write-buffer, error %d", err); in ubifs_bg_wbufs_sync()
647 ubifs_ro_mode(c, err); in ubifs_bg_wbufs_sync()
656 for (i = 0; i < c->jhead_cnt; i++) { in ubifs_bg_wbufs_sync()
657 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; in ubifs_bg_wbufs_sync()
684 struct ubifs_info *c = wbuf->c; in ubifs_wbuf_write_nolock() local
690 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); in ubifs_wbuf_write_nolock()
691 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); in ubifs_wbuf_write_nolock()
692 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); in ubifs_wbuf_write_nolock()
694 ubifs_assert(wbuf->size >= c->min_io_size); in ubifs_wbuf_write_nolock()
695 ubifs_assert(wbuf->size <= c->max_write_size); in ubifs_wbuf_write_nolock()
696 ubifs_assert(wbuf->size % c->min_io_size == 0); in ubifs_wbuf_write_nolock()
698 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_wbuf_write_nolock()
699 ubifs_assert(!c->space_fixup); in ubifs_wbuf_write_nolock()
700 if (c->leb_size - wbuf->offs >= c->max_write_size) in ubifs_wbuf_write_nolock()
701 ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); in ubifs_wbuf_write_nolock()
703 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { in ubifs_wbuf_write_nolock()
710 if (c->ro_error) in ubifs_wbuf_write_nolock()
723 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, in ubifs_wbuf_write_nolock()
730 if (c->leb_size - wbuf->offs >= c->max_write_size) in ubifs_wbuf_write_nolock()
731 wbuf->size = c->max_write_size; in ubifs_wbuf_write_nolock()
733 wbuf->size = c->leb_size - wbuf->offs; in ubifs_wbuf_write_nolock()
759 err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, in ubifs_wbuf_write_nolock()
768 } else if (wbuf->offs & (c->max_write_size - 1)) { in ubifs_wbuf_write_nolock()
778 err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs, in ubifs_wbuf_write_nolock()
795 n = aligned_len >> c->max_write_shift; in ubifs_wbuf_write_nolock()
797 n <<= c->max_write_shift; in ubifs_wbuf_write_nolock()
800 err = ubifs_leb_write(c, wbuf->lnum, buf + written, in ubifs_wbuf_write_nolock()
819 if (c->leb_size - wbuf->offs >= c->max_write_size) in ubifs_wbuf_write_nolock()
820 wbuf->size = c->max_write_size; in ubifs_wbuf_write_nolock()
822 wbuf->size = c->leb_size - wbuf->offs; in ubifs_wbuf_write_nolock()
830 int free = c->leb_size - wbuf->offs - wbuf->used; in ubifs_wbuf_write_nolock()
832 err = wbuf->sync_callback(c, wbuf->lnum, free, 0); in ubifs_wbuf_write_nolock()
843 ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d", in ubifs_wbuf_write_nolock()
845 ubifs_dump_node(c, buf); in ubifs_wbuf_write_nolock()
847 ubifs_dump_leb(c, wbuf->lnum); in ubifs_wbuf_write_nolock()
865 int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, in ubifs_write_node() argument
868 int err, buf_len = ALIGN(len, c->min_io_size); in ubifs_write_node()
873 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); in ubifs_write_node()
874 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); in ubifs_write_node()
875 ubifs_assert(!c->ro_media && !c->ro_mount); in ubifs_write_node()
876 ubifs_assert(!c->space_fixup); in ubifs_write_node()
878 if (c->ro_error) in ubifs_write_node()
881 ubifs_prepare_node(c, buf, len, 1); in ubifs_write_node()
882 err = ubifs_leb_write(c, lnum, buf, offs, buf_len); in ubifs_write_node()
884 ubifs_dump_node(c, buf); in ubifs_write_node()
908 const struct ubifs_info *c = wbuf->c; in ubifs_read_node_wbuf() local
914 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); in ubifs_read_node_wbuf()
915 ubifs_assert(!(offs & 7) && offs < c->leb_size); in ubifs_read_node_wbuf()
923 return ubifs_read_node(c, buf, type, len, lnum, offs); in ubifs_read_node_wbuf()
937 err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0); in ubifs_read_node_wbuf()
943 ubifs_err(c, "bad node type (%d but expected %d)", in ubifs_read_node_wbuf()
948 err = ubifs_check_node(c, buf, lnum, offs, 0, 0); in ubifs_read_node_wbuf()
950 ubifs_err(c, "expected node type %d", type); in ubifs_read_node_wbuf()
956 ubifs_err(c, "bad node length %d, expected %d", rlen, len); in ubifs_read_node_wbuf()
963 ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); in ubifs_read_node_wbuf()
964 ubifs_dump_node(c, buf); in ubifs_read_node_wbuf()
982 int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, in ubifs_read_node() argument
989 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); in ubifs_read_node()
990 ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size); in ubifs_read_node()
991 ubifs_assert(!(offs & 7) && offs < c->leb_size); in ubifs_read_node()
994 err = ubifs_leb_read(c, lnum, buf, offs, len, 0); in ubifs_read_node()
999 ubifs_errc(c, "bad node type (%d but expected %d)", in ubifs_read_node()
1004 err = ubifs_check_node(c, buf, lnum, offs, 0, 0); in ubifs_read_node()
1006 ubifs_errc(c, "expected node type %d", type); in ubifs_read_node()
1012 ubifs_errc(c, "bad node length %d, expected %d", l, len); in ubifs_read_node()
1019 ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum, in ubifs_read_node()
1020 offs, ubi_is_mapped(c->ubi, lnum)); in ubifs_read_node()
1021 if (!c->probing) { in ubifs_read_node()
1022 ubifs_dump_node(c, buf); in ubifs_read_node()
1036 int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) in ubifs_wbuf_init() argument
1040 wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL); in ubifs_wbuf_init()
1044 size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t); in ubifs_wbuf_init()
1060 size = c->max_write_size - (c->leb_start % c->max_write_size); in ubifs_wbuf_init()
1065 wbuf->c = c; in ubifs_wbuf_init()
1130 int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode) in ubifs_sync_wbufs_by_inode() argument
1134 for (i = 0; i < c->jhead_cnt; i++) { in ubifs_sync_wbufs_by_inode()
1135 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; in ubifs_sync_wbufs_by_inode()
1155 ubifs_ro_mode(c, err); in ubifs_sync_wbufs_by_inode()