// SPDX-License-Identifier: GPL-2.0-only /* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ /* * This file implements most of the debugging stuff which is compiled in only * when it is enabled. But some debugging check functions are implemented in * corresponding subsystem, just because they are closely related and utilize * various local functions of those subsystems. */ #include #include #include "linux_err.h" #include "bitops.h" #include "kmem.h" #include "ubifs.h" #include "defs.h" #include "debug.h" #include "key.h" #include "misc.h" static DEFINE_SPINLOCK(dbg_lock); static const char *get_key_fmt(int fmt) { switch (fmt) { case UBIFS_SIMPLE_KEY_FMT: return "simple"; default: return "unknown/invalid format"; } } static const char *get_key_hash(int hash) { switch (hash) { case UBIFS_KEY_HASH_R5: return "R5"; case UBIFS_KEY_HASH_TEST: return "test"; default: return "unknown/invalid name hash"; } } static const char *get_key_type(int type) { switch (type) { case UBIFS_INO_KEY: return "inode"; case UBIFS_DENT_KEY: return "direntry"; case UBIFS_XENT_KEY: return "xentry"; case UBIFS_DATA_KEY: return "data"; case UBIFS_TRUN_KEY: return "truncate"; default: return "unknown/invalid key"; } } const char *ubifs_get_type_name(int type) { switch (type) { case UBIFS_ITYPE_REG: return "file"; case UBIFS_ITYPE_DIR: return "dir"; case UBIFS_ITYPE_LNK: return "symlink"; case UBIFS_ITYPE_BLK: return "blkdev"; case UBIFS_ITYPE_CHR: return "char dev"; case UBIFS_ITYPE_FIFO: return "fifo"; case UBIFS_ITYPE_SOCK: return "socket"; default: return "unknown/invalid type"; } } const char *dbg_snprintf_key(const struct ubifs_info *c, const union ubifs_key *key, char *buffer, int len) { char *p = buffer; int type = key_type(c, key); if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) { switch (type) { case UBIFS_INO_KEY: len -= snprintf(p, len, "(%lu, %s)", (unsigned long)key_inum(c, key), get_key_type(type)); break; case UBIFS_DENT_KEY: case UBIFS_XENT_KEY: len -= snprintf(p, len, "(%lu, %s, %#08x)", (unsigned long)key_inum(c, key), get_key_type(type), key_hash(c, key)); break; case UBIFS_DATA_KEY: len -= snprintf(p, len, "(%lu, %s, %u)", (unsigned long)key_inum(c, key), get_key_type(type), key_block(c, key)); break; case UBIFS_TRUN_KEY: len -= snprintf(p, len, "(%lu, %s)", (unsigned long)key_inum(c, key), get_key_type(type)); break; default: len -= snprintf(p, len, "(bad key type: %#08x, %#08x)", key->u32[0], key->u32[1]); } } else len -= snprintf(p, len, "bad key format %d", c->key_fmt); ubifs_assert(c, len > 0); return p; } const char *dbg_ntype(int type) { switch (type) { case UBIFS_PAD_NODE: return "padding node"; case UBIFS_SB_NODE: return "superblock node"; case UBIFS_MST_NODE: return "master node"; case UBIFS_REF_NODE: return "reference node"; case UBIFS_INO_NODE: return "inode node"; case UBIFS_DENT_NODE: return "direntry node"; case UBIFS_XENT_NODE: return "xentry node"; case UBIFS_DATA_NODE: return "data node"; case UBIFS_TRUN_NODE: return "truncate node"; case UBIFS_IDX_NODE: return "indexing node"; case UBIFS_CS_NODE: return "commit start node"; case UBIFS_ORPH_NODE: return "orphan node"; case UBIFS_AUTH_NODE: return "auth node"; default: return "unknown node"; } } static const char *dbg_gtype(int type) { switch (type) { case UBIFS_NO_NODE_GROUP: return "no node group"; case UBIFS_IN_NODE_GROUP: return "in node group"; case UBIFS_LAST_OF_NODE_GROUP: return "last of node group"; default: return "unknown"; } } const char *dbg_cstate(int cmt_state) { switch (cmt_state) { case COMMIT_RESTING: return "commit resting"; case COMMIT_BACKGROUND: return "background commit requested"; case COMMIT_REQUIRED: return "commit required"; case COMMIT_RUNNING_BACKGROUND: return "BACKGROUND commit running"; case COMMIT_RUNNING_REQUIRED: return "commit running and required"; case COMMIT_BROKEN: return "broken commit"; default: return "unknown commit state"; } } const char *dbg_jhead(int jhead) { switch (jhead) { case GCHD: return "0 (GC)"; case BASEHD: return "1 (base)"; case DATAHD: return "2 (data)"; default: return "unknown journal head"; } } static void dump_ch(const struct ubifs_ch *ch) { pr_err("\tmagic %#x\n", le32_to_cpu(ch->magic)); pr_err("\tcrc %#x\n", le32_to_cpu(ch->crc)); pr_err("\tnode_type %d (%s)\n", ch->node_type, dbg_ntype(ch->node_type)); pr_err("\tgroup_type %d (%s)\n", ch->group_type, dbg_gtype(ch->group_type)); pr_err("\tsqnum %llu\n", (unsigned long long)le64_to_cpu(ch->sqnum)); pr_err("\tlen %u\n", le32_to_cpu(ch->len)); } void ubifs_dump_node(const struct ubifs_info *c, const void *node, int node_len) { int i, n, type, safe_len, max_node_len, min_node_len; union ubifs_key key; const struct ubifs_ch *ch = node; char key_buf[DBG_KEY_BUF_LEN]; /* If the magic is incorrect, just hexdump the first bytes */ if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) { pr_err("Not a node, first %zu bytes:", UBIFS_CH_SZ); print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1, (void *)node, UBIFS_CH_SZ, 1); return; } /* Skip dumping unknown type node */ type = ch->node_type; if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) { pr_err("node type %d was not recognized\n", type); return; } spin_lock(&dbg_lock); dump_ch(node); if (c->ranges[type].max_len == 0) { max_node_len = min_node_len = c->ranges[type].len; } else { max_node_len = c->ranges[type].max_len; min_node_len = c->ranges[type].min_len; } safe_len = le32_to_cpu(ch->len); safe_len = safe_len > 0 ? safe_len : 0; safe_len = min3(safe_len, max_node_len, node_len); if (safe_len < min_node_len) { pr_err("node len(%d) is too short for %s, left %d bytes:\n", safe_len, dbg_ntype(type), safe_len > UBIFS_CH_SZ ? safe_len - (int)UBIFS_CH_SZ : 0); if (safe_len > UBIFS_CH_SZ) print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1, (void *)node + UBIFS_CH_SZ, safe_len - UBIFS_CH_SZ, 0); goto out_unlock; } if (safe_len != le32_to_cpu(ch->len)) pr_err("\ttruncated node length %d\n", safe_len); switch (type) { case UBIFS_PAD_NODE: { const struct ubifs_pad_node *pad = node; pr_err("\tpad_len %u\n", le32_to_cpu(pad->pad_len)); break; } case UBIFS_SB_NODE: { const struct ubifs_sb_node *sup = node; unsigned int sup_flags = le32_to_cpu(sup->flags); pr_err("\tkey_hash %d (%s)\n", (int)sup->key_hash, get_key_hash(sup->key_hash)); pr_err("\tkey_fmt %d (%s)\n", (int)sup->key_fmt, get_key_fmt(sup->key_fmt)); pr_err("\tflags %#x\n", sup_flags); pr_err("\tbig_lpt %u\n", !!(sup_flags & UBIFS_FLG_BIGLPT)); pr_err("\tspace_fixup %u\n", !!(sup_flags & UBIFS_FLG_SPACE_FIXUP)); pr_err("\tmin_io_size %u\n", le32_to_cpu(sup->min_io_size)); pr_err("\tleb_size %u\n", le32_to_cpu(sup->leb_size)); pr_err("\tleb_cnt %u\n", le32_to_cpu(sup->leb_cnt)); pr_err("\tmax_leb_cnt %u\n", le32_to_cpu(sup->max_leb_cnt)); pr_err("\tmax_bud_bytes %llu\n", (unsigned long long)le64_to_cpu(sup->max_bud_bytes)); pr_err("\tlog_lebs %u\n", le32_to_cpu(sup->log_lebs)); pr_err("\tlpt_lebs %u\n", le32_to_cpu(sup->lpt_lebs)); pr_err("\torph_lebs %u\n", le32_to_cpu(sup->orph_lebs)); pr_err("\tjhead_cnt %u\n", le32_to_cpu(sup->jhead_cnt)); pr_err("\tfanout %u\n", le32_to_cpu(sup->fanout)); pr_err("\tlsave_cnt %u\n", le32_to_cpu(sup->lsave_cnt)); pr_err("\tdefault_compr %u\n", (int)le16_to_cpu(sup->default_compr)); pr_err("\trp_size %llu\n", (unsigned long long)le64_to_cpu(sup->rp_size)); pr_err("\trp_uid %u\n", le32_to_cpu(sup->rp_uid)); pr_err("\trp_gid %u\n", le32_to_cpu(sup->rp_gid)); pr_err("\tfmt_version %u\n", le32_to_cpu(sup->fmt_version)); pr_err("\ttime_gran %u\n", le32_to_cpu(sup->time_gran)); pr_err("\tUUID %pUB\n", sup->uuid); break; } case UBIFS_MST_NODE: { const struct ubifs_mst_node *mst = node; pr_err("\thighest_inum %llu\n", (unsigned long long)le64_to_cpu(mst->highest_inum)); pr_err("\tcommit number %llu\n", (unsigned long long)le64_to_cpu(mst->cmt_no)); pr_err("\tflags %#x\n", le32_to_cpu(mst->flags)); pr_err("\tlog_lnum %u\n", le32_to_cpu(mst->log_lnum)); pr_err("\troot_lnum %u\n", le32_to_cpu(mst->root_lnum)); pr_err("\troot_offs %u\n", le32_to_cpu(mst->root_offs)); pr_err("\troot_len %u\n", le32_to_cpu(mst->root_len)); pr_err("\tgc_lnum %u\n", le32_to_cpu(mst->gc_lnum)); pr_err("\tihead_lnum %u\n", le32_to_cpu(mst->ihead_lnum)); pr_err("\tihead_offs %u\n", le32_to_cpu(mst->ihead_offs)); pr_err("\tindex_size %llu\n", (unsigned long long)le64_to_cpu(mst->index_size)); pr_err("\tlpt_lnum %u\n", le32_to_cpu(mst->lpt_lnum)); pr_err("\tlpt_offs %u\n", le32_to_cpu(mst->lpt_offs)); pr_err("\tnhead_lnum %u\n", le32_to_cpu(mst->nhead_lnum)); pr_err("\tnhead_offs %u\n", le32_to_cpu(mst->nhead_offs)); pr_err("\tltab_lnum %u\n", le32_to_cpu(mst->ltab_lnum)); pr_err("\tltab_offs %u\n", le32_to_cpu(mst->ltab_offs)); pr_err("\tlsave_lnum %u\n", le32_to_cpu(mst->lsave_lnum)); pr_err("\tlsave_offs %u\n", le32_to_cpu(mst->lsave_offs)); pr_err("\tlscan_lnum %u\n", le32_to_cpu(mst->lscan_lnum)); pr_err("\tleb_cnt %u\n", le32_to_cpu(mst->leb_cnt)); pr_err("\tempty_lebs %u\n", le32_to_cpu(mst->empty_lebs)); pr_err("\tidx_lebs %u\n", le32_to_cpu(mst->idx_lebs)); pr_err("\ttotal_free %llu\n", (unsigned long long)le64_to_cpu(mst->total_free)); pr_err("\ttotal_dirty %llu\n", (unsigned long long)le64_to_cpu(mst->total_dirty)); pr_err("\ttotal_used %llu\n", (unsigned long long)le64_to_cpu(mst->total_used)); pr_err("\ttotal_dead %llu\n", (unsigned long long)le64_to_cpu(mst->total_dead)); pr_err("\ttotal_dark %llu\n", (unsigned long long)le64_to_cpu(mst->total_dark)); break; } case UBIFS_REF_NODE: { const struct ubifs_ref_node *ref = node; pr_err("\tlnum %u\n", le32_to_cpu(ref->lnum)); pr_err("\toffs %u\n", le32_to_cpu(ref->offs)); pr_err("\tjhead %u\n", le32_to_cpu(ref->jhead)); break; } case UBIFS_INO_NODE: { const struct ubifs_ino_node *ino = node; key_read(c, &ino->key, &key); pr_err("\tkey %s\n", dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); pr_err("\tcreat_sqnum %llu\n", (unsigned long long)le64_to_cpu(ino->creat_sqnum)); pr_err("\tsize %llu\n", (unsigned long long)le64_to_cpu(ino->size)); pr_err("\tnlink %u\n", le32_to_cpu(ino->nlink)); pr_err("\tatime %lld.%u\n", (long long)le64_to_cpu(ino->atime_sec), le32_to_cpu(ino->atime_nsec)); pr_err("\tmtime %lld.%u\n", (long long)le64_to_cpu(ino->mtime_sec), le32_to_cpu(ino->mtime_nsec)); pr_err("\tctime %lld.%u\n", (long long)le64_to_cpu(ino->ctime_sec), le32_to_cpu(ino->ctime_nsec)); pr_err("\tuid %u\n", le32_to_cpu(ino->uid)); pr_err("\tgid %u\n", le32_to_cpu(ino->gid)); pr_err("\tmode %u\n", le32_to_cpu(ino->mode)); pr_err("\tflags %#x\n", le32_to_cpu(ino->flags)); pr_err("\txattr_cnt %u\n", le32_to_cpu(ino->xattr_cnt)); pr_err("\txattr_size %u\n", le32_to_cpu(ino->xattr_size)); pr_err("\txattr_names %u\n", le32_to_cpu(ino->xattr_names)); pr_err("\tcompr_type %#x\n", (int)le16_to_cpu(ino->compr_type)); pr_err("\tdata len %u\n", le32_to_cpu(ino->data_len)); break; } case UBIFS_DENT_NODE: case UBIFS_XENT_NODE: { const struct ubifs_dent_node *dent = node; int nlen = le16_to_cpu(dent->nlen); key_read(c, &dent->key, &key); pr_err("\tkey %s\n", dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); pr_err("\tinum %llu\n", (unsigned long long)le64_to_cpu(dent->inum)); pr_err("\ttype %d\n", (int)dent->type); pr_err("\tnlen %d\n", nlen); pr_err("\tname "); if (nlen > UBIFS_MAX_NLEN || nlen > safe_len - UBIFS_DENT_NODE_SZ) pr_err("(bad name length, not printing, bad or corrupted node)"); else { for (i = 0; i < nlen && dent->name[i]; i++) pr_cont("%c", isprint(dent->name[i]) ? dent->name[i] : '?'); } pr_cont("\n"); break; } case UBIFS_DATA_NODE: { const struct ubifs_data_node *dn = node; key_read(c, &dn->key, &key); pr_err("\tkey %s\n", dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); pr_err("\tsize %u\n", le32_to_cpu(dn->size)); pr_err("\tcompr_typ %d\n", (int)le16_to_cpu(dn->compr_type)); pr_err("\tdata size %u\n", le32_to_cpu(ch->len) - (unsigned int)UBIFS_DATA_NODE_SZ); pr_err("\tdata (length = %d):\n", safe_len - (int)UBIFS_DATA_NODE_SZ); print_hex_dump("\t", DUMP_PREFIX_OFFSET, 32, 1, (void *)&dn->data, safe_len - (int)UBIFS_DATA_NODE_SZ, 0); break; } case UBIFS_TRUN_NODE: { const struct ubifs_trun_node *trun = node; pr_err("\tinum %u\n", le32_to_cpu(trun->inum)); pr_err("\told_size %llu\n", (unsigned long long)le64_to_cpu(trun->old_size)); pr_err("\tnew_size %llu\n", (unsigned long long)le64_to_cpu(trun->new_size)); break; } case UBIFS_IDX_NODE: { const struct ubifs_idx_node *idx = node; int max_child_cnt = (safe_len - UBIFS_IDX_NODE_SZ) / (ubifs_idx_node_sz(c, 1) - UBIFS_IDX_NODE_SZ); n = min_t(int, le16_to_cpu(idx->child_cnt), max_child_cnt); pr_err("\tchild_cnt %d\n", (int)le16_to_cpu(idx->child_cnt)); pr_err("\tlevel %d\n", (int)le16_to_cpu(idx->level)); pr_err("\tBranches:\n"); for (i = 0; i < n && i < c->fanout; i++) { const struct ubifs_branch *br; br = ubifs_idx_branch(c, idx, i); key_read(c, &br->key, &key); pr_err("\t%d: LEB %d:%d len %d key %s\n", i, le32_to_cpu(br->lnum), le32_to_cpu(br->offs), le32_to_cpu(br->len), dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); } break; } case UBIFS_CS_NODE: break; case UBIFS_ORPH_NODE: { const struct ubifs_orph_node *orph = node; pr_err("\tcommit number %llu\n", (unsigned long long) le64_to_cpu(orph->cmt_no) & LLONG_MAX); pr_err("\tlast node flag %llu\n", (unsigned long long)(le64_to_cpu(orph->cmt_no)) >> 63); n = (safe_len - UBIFS_ORPH_NODE_SZ) >> 3; pr_err("\t%d orphan inode numbers:\n", n); for (i = 0; i < n; i++) pr_err("\t ino %llu\n", (unsigned long long)le64_to_cpu(orph->inos[i])); break; } case UBIFS_AUTH_NODE: { break; } default: pr_err("node type %d was not recognized\n", type); } out_unlock: spin_unlock(&dbg_lock); } void ubifs_dump_lstats(const struct ubifs_lp_stats *lst) { spin_lock(&dbg_lock); pr_err("(pid %d) Lprops statistics: empty_lebs %d, idx_lebs %d\n", getpid(), lst->empty_lebs, lst->idx_lebs); pr_err("\ttaken_empty_lebs %d, total_free %lld, total_dirty %lld\n", lst->taken_empty_lebs, lst->total_free, lst->total_dirty); pr_err("\ttotal_used %lld, total_dark %lld, total_dead %lld\n", lst->total_used, lst->total_dark, lst->total_dead); spin_unlock(&dbg_lock); } void ubifs_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi) { int i; struct rb_node *rb; struct ubifs_bud *bud; struct ubifs_gced_idx_leb *idx_gc; long long available, outstanding, free; spin_lock(&c->space_lock); spin_lock(&dbg_lock); pr_err("(pid %d) Budgeting info: data budget sum %lld, total budget sum %lld\n", getpid(), bi->data_growth + bi->dd_growth, bi->data_growth + bi->dd_growth + bi->idx_growth); pr_err("\tbudg_data_growth %lld, budg_dd_growth %lld, budg_idx_growth %lld\n", bi->data_growth, bi->dd_growth, bi->idx_growth); pr_err("\tmin_idx_lebs %d, old_idx_sz %llu, uncommitted_idx %lld\n", bi->min_idx_lebs, bi->old_idx_sz, bi->uncommitted_idx); pr_err("\tpage_budget %d, inode_budget %d, dent_budget %d\n", bi->page_budget, bi->inode_budget, bi->dent_budget); pr_err("\tnospace %u, nospace_rp %u\n", bi->nospace, bi->nospace_rp); pr_err("\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n", c->dark_wm, c->dead_wm, c->max_idx_node_sz); if (bi != &c->bi) /* * If we are dumping saved budgeting data, do not print * additional information which is about the current state, not * the old one which corresponded to the saved budgeting data. */ goto out_unlock; pr_err("\tfreeable_cnt %d, calc_idx_sz %lld, idx_gc_cnt %d\n", c->freeable_cnt, c->calc_idx_sz, c->idx_gc_cnt); pr_err("\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, clean_zn_cnt %ld\n", atomic_long_read(&c->dirty_pg_cnt), atomic_long_read(&c->dirty_zn_cnt), atomic_long_read(&c->clean_zn_cnt)); pr_err("\tgc_lnum %d, ihead_lnum %d\n", c->gc_lnum, c->ihead_lnum); /* If we are in R/O mode, journal heads do not exist */ if (c->jheads) for (i = 0; i < c->jhead_cnt; i++) pr_err("\tjhead %s\t LEB %d\n", dbg_jhead(c->jheads[i].wbuf.jhead), c->jheads[i].wbuf.lnum); for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { bud = rb_entry(rb, struct ubifs_bud, rb); pr_err("\tbud LEB %d\n", bud->lnum); } list_for_each_entry(bud, &c->old_buds, list) pr_err("\told bud LEB %d\n", bud->lnum); list_for_each_entry(idx_gc, &c->idx_gc, list) pr_err("\tGC'ed idx LEB %d unmap %d\n", idx_gc->lnum, idx_gc->unmap); pr_err("\tcommit state %d\n", c->cmt_state); /* Print budgeting predictions */ available = ubifs_calc_available(c, c->bi.min_idx_lebs); outstanding = c->bi.data_growth + c->bi.dd_growth; free = ubifs_get_free_space_nolock(c); pr_err("Budgeting predictions:\n"); pr_err("\tavailable: %lld, outstanding %lld, free %lld\n", available, outstanding, free); out_unlock: spin_unlock(&dbg_lock); spin_unlock(&c->space_lock); } void ubifs_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp) { int i, spc, dark = 0, dead = 0; struct rb_node *rb; struct ubifs_bud *bud; spc = lp->free + lp->dirty; if (spc < c->dead_wm) dead = spc; else dark = ubifs_calc_dark(c, spc); if (lp->flags & LPROPS_INDEX) pr_err("LEB %-7d free %-8d dirty %-8d used %-8d free + dirty %-8d flags %#x (", lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc, lp->flags); else pr_err("LEB %-7d free %-8d dirty %-8d used %-8d free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d flags %#-4x (", lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc, dark, dead, (int)(spc / UBIFS_MAX_NODE_SZ), lp->flags); if (lp->flags & LPROPS_TAKEN) { if (lp->flags & LPROPS_INDEX) pr_cont("index, taken"); else pr_cont("taken"); } else { const char *s; if (lp->flags & LPROPS_INDEX) { switch (lp->flags & LPROPS_CAT_MASK) { case LPROPS_DIRTY_IDX: s = "dirty index"; break; case LPROPS_FRDI_IDX: s = "freeable index"; break; default: s = "index"; } } else { switch (lp->flags & LPROPS_CAT_MASK) { case LPROPS_UNCAT: s = "not categorized"; break; case LPROPS_DIRTY: s = "dirty"; break; case LPROPS_FREE: s = "free"; break; case LPROPS_EMPTY: s = "empty"; break; case LPROPS_FREEABLE: s = "freeable"; break; default: s = NULL; break; } } pr_cont("%s", s); } for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) { bud = rb_entry(rb, struct ubifs_bud, rb); if (bud->lnum == lp->lnum) { int head = 0; for (i = 0; i < c->jhead_cnt; i++) { /* * Note, if we are in R/O mode or in the middle * of mounting/re-mounting, the write-buffers do * not exist. */ if (c->jheads && lp->lnum == c->jheads[i].wbuf.lnum) { pr_cont(", jhead %s", dbg_jhead(i)); head = 1; } } if (!head) pr_cont(", bud of jhead %s", dbg_jhead(bud->jhead)); } } if (lp->lnum == c->gc_lnum) pr_cont(", GC LEB"); pr_cont(")\n"); } void ubifs_dump_lprops(struct ubifs_info *c) { int lnum, err; struct ubifs_lprops lp; struct ubifs_lp_stats lst; pr_err("(pid %d) start dumping LEB properties\n", getpid()); ubifs_get_lp_stats(c, &lst); ubifs_dump_lstats(&lst); for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) { err = ubifs_read_one_lp(c, lnum, &lp); if (err) { ubifs_err(c, "cannot read lprops for LEB %d", lnum); continue; } ubifs_dump_lprop(c, &lp); } pr_err("(pid %d) finish dumping LEB properties\n", getpid()); } void ubifs_dump_lpt_info(struct ubifs_info *c) { int i; spin_lock(&dbg_lock); pr_err("(pid %d) dumping LPT information\n", getpid()); pr_err("\tlpt_sz: %lld\n", c->lpt_sz); pr_err("\tpnode_sz: %d\n", c->pnode_sz); pr_err("\tnnode_sz: %d\n", c->nnode_sz); pr_err("\tltab_sz: %d\n", c->ltab_sz); pr_err("\tlsave_sz: %d\n", c->lsave_sz); pr_err("\tbig_lpt: %u\n", c->big_lpt); pr_err("\tlpt_hght: %d\n", c->lpt_hght); pr_err("\tpnode_cnt: %d\n", c->pnode_cnt); pr_err("\tnnode_cnt: %d\n", c->nnode_cnt); pr_err("\tdirty_pn_cnt: %d\n", c->dirty_pn_cnt); pr_err("\tdirty_nn_cnt: %d\n", c->dirty_nn_cnt); pr_err("\tlsave_cnt: %d\n", c->lsave_cnt); pr_err("\tspace_bits: %d\n", c->space_bits); pr_err("\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits); pr_err("\tlpt_offs_bits: %d\n", c->lpt_offs_bits); pr_err("\tlpt_spc_bits: %d\n", c->lpt_spc_bits); pr_err("\tpcnt_bits: %d\n", c->pcnt_bits); pr_err("\tlnum_bits: %d\n", c->lnum_bits); pr_err("\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs); pr_err("\tLPT head is at %d:%d\n", c->nhead_lnum, c->nhead_offs); pr_err("\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs); if (c->big_lpt) pr_err("\tLPT lsave is at %d:%d\n", c->lsave_lnum, c->lsave_offs); for (i = 0; i < c->lpt_lebs; i++) pr_err("\tLPT LEB %d free %d dirty %d tgc %d cmt %d\n", i + c->lpt_first, c->ltab[i].free, c->ltab[i].dirty, c->ltab[i].tgc, c->ltab[i].cmt); spin_unlock(&dbg_lock); } void ubifs_dump_leb(const struct ubifs_info *c, int lnum) { struct ubifs_scan_leb *sleb; struct ubifs_scan_node *snod; void *buf; pr_err("(pid %d) start dumping LEB %d\n", getpid(), lnum); buf = __vmalloc(c->leb_size, GFP_NOFS); if (!buf) { ubifs_err(c, "cannot allocate memory for dumping LEB %d", lnum); return; } sleb = ubifs_scan(c, lnum, 0, buf, 0); if (IS_ERR(sleb)) { ubifs_err(c, "scan error %d", (int)PTR_ERR(sleb)); goto out; } pr_err("LEB %d has %d nodes ending at %d\n", lnum, sleb->nodes_cnt, sleb->endpt); list_for_each_entry(snod, &sleb->nodes, list) { cond_resched(); pr_err("Dumping node at LEB %d:%d len %d\n", lnum, snod->offs, snod->len); ubifs_dump_node(c, snod->node, c->leb_size - snod->offs); } pr_err("(pid %d) finish dumping LEB %d\n", getpid(), lnum); ubifs_scan_destroy(sleb); out: vfree(buf); return; } void ubifs_dump_znode(const struct ubifs_info *c, const struct ubifs_znode *znode) { int n; const struct ubifs_zbranch *zbr; char key_buf[DBG_KEY_BUF_LEN]; spin_lock(&dbg_lock); if (znode->parent) zbr = &znode->parent->zbranch[znode->iip]; else zbr = &c->zroot; pr_err("znode %p, LEB %d:%d len %d parent %p iip %d level %d child_cnt %d flags %lx\n", znode, zbr->lnum, zbr->offs, zbr->len, znode->parent, znode->iip, znode->level, znode->child_cnt, znode->flags); if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) { spin_unlock(&dbg_lock); return; } pr_err("zbranches:\n"); for (n = 0; n < znode->child_cnt; n++) { zbr = &znode->zbranch[n]; if (znode->level > 0) pr_err("\t%d: znode %p LEB %d:%d len %d key %s\n", n, zbr->znode, zbr->lnum, zbr->offs, zbr->len, dbg_snprintf_key(c, &zbr->key, key_buf, DBG_KEY_BUF_LEN)); else pr_err("\t%d: LNC %p LEB %d:%d len %d key %s\n", n, zbr->znode, zbr->lnum, zbr->offs, zbr->len, dbg_snprintf_key(c, &zbr->key, key_buf, DBG_KEY_BUF_LEN)); } spin_unlock(&dbg_lock); } void ubifs_dump_heap(__unused struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat) { int i; pr_err("(pid %d) start dumping heap cat %d (%d elements)\n", getpid(), cat, heap->cnt); for (i = 0; i < heap->cnt; i++) { struct ubifs_lprops *lprops = heap->arr[i]; pr_err("\t%d. LEB %d hpos %d free %d dirty %d flags %d\n", i, lprops->lnum, lprops->hpos, lprops->free, lprops->dirty, lprops->flags); } pr_err("(pid %d) finish dumping heap\n", getpid()); } void ubifs_dump_pnode(__unused struct ubifs_info *c, struct ubifs_pnode *pnode, struct ubifs_nnode *parent, int iip) { int i; pr_err("(pid %d) dumping pnode:\n", getpid()); pr_err("\taddress %zx parent %zx cnext %zx\n", (size_t)pnode, (size_t)parent, (size_t)pnode->cnext); pr_err("\tflags %lu iip %d level %d num %d\n", pnode->flags, iip, pnode->level, pnode->num); for (i = 0; i < UBIFS_LPT_FANOUT; i++) { struct ubifs_lprops *lp = &pnode->lprops[i]; pr_err("\t%d: free %d dirty %d flags %d lnum %d\n", i, lp->free, lp->dirty, lp->flags, lp->lnum); } } /** * dbg_walk_index - walk the on-flash index. * @c: UBIFS file-system description object * @leaf_cb: called for each leaf node * @znode_cb: called for each indexing node * @priv: private data which is passed to callbacks * * This function walks the UBIFS index and calls the @leaf_cb for each leaf * node and @znode_cb for each indexing node. Returns zero in case of success * and a negative error code in case of failure. * * It would be better if this function removed every znode it pulled to into * the TNC, so that the behavior more closely matched the non-debugging * behavior. */ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, dbg_znode_callback znode_cb, void *priv) { int err; struct ubifs_zbranch *zbr; struct ubifs_znode *znode, *child; mutex_lock(&c->tnc_mutex); /* If the root indexing node is not in TNC - pull it */ if (!c->zroot.znode) { c->zroot.znode = ubifs_load_znode(c, &c->zroot, NULL, 0); if (IS_ERR(c->zroot.znode)) { err = PTR_ERR(c->zroot.znode); c->zroot.znode = NULL; goto out_unlock; } } /* * We are going to traverse the indexing tree in the postorder manner. * Go down and find the leftmost indexing node where we are going to * start from. */ znode = c->zroot.znode; while (znode->level > 0) { zbr = &znode->zbranch[0]; child = zbr->znode; if (!child) { child = ubifs_load_znode(c, zbr, znode, 0); if (IS_ERR(child)) { err = PTR_ERR(child); goto out_unlock; } } znode = child; } /* Iterate over all indexing nodes */ while (1) { int idx; cond_resched(); if (znode_cb) { err = znode_cb(c, znode, priv); if (err) { ubifs_err(c, "znode checking function returned error %d", err); ubifs_dump_znode(c, znode); goto out_dump; } } if (leaf_cb && znode->level == 0) { for (idx = 0; idx < znode->child_cnt; idx++) { zbr = &znode->zbranch[idx]; err = leaf_cb(c, zbr, priv); if (err) { ubifs_err(c, "leaf checking function returned error %d, for leaf at LEB %d:%d", err, zbr->lnum, zbr->offs); goto out_dump; } } } if (!znode->parent) break; idx = znode->iip + 1; znode = znode->parent; if (idx < znode->child_cnt) { /* Switch to the next index in the parent */ zbr = &znode->zbranch[idx]; child = zbr->znode; if (!child) { child = ubifs_load_znode(c, zbr, znode, idx); if (IS_ERR(child)) { err = PTR_ERR(child); goto out_unlock; } zbr->znode = child; } znode = child; } else /* * This is the last child, switch to the parent and * continue. */ continue; /* Go to the lowest leftmost znode in the new sub-tree */ while (znode->level > 0) { zbr = &znode->zbranch[0]; child = zbr->znode; if (!child) { child = ubifs_load_znode(c, zbr, znode, 0); if (IS_ERR(child)) { err = PTR_ERR(child); goto out_unlock; } zbr->znode = child; } znode = child; } } mutex_unlock(&c->tnc_mutex); return 0; out_dump: if (znode->parent) zbr = &znode->parent->zbranch[znode->iip]; else zbr = &c->zroot; ubifs_msg(c, "dump of znode at LEB %d:%d", zbr->lnum, zbr->offs); ubifs_dump_znode(c, znode); out_unlock: mutex_unlock(&c->tnc_mutex); return err; } /** * add_size - add znode size to partially calculated index size. * @c: UBIFS file-system description object * @znode: znode to add size for * @priv: partially calculated index size * * This is a helper function for 'dbg_check_idx_size()' which is called for * every indexing node and adds its size to the 'long long' variable pointed to * by @priv. */ int add_size(struct ubifs_info *c, struct ubifs_znode *znode, void *priv) { long long *idx_size = priv; int add; add = ubifs_idx_node_sz(c, znode->child_cnt); add = ALIGN(add, 8); *idx_size += add; return 0; } void ubifs_assert_failed(struct ubifs_info *c, const char *expr, const char *file, int line) { ubifs_err(c, "UBIFS assert failed: %s, in %s:%u", expr, file, line); /* * Different from linux kernel. * Invoke callback function if there is one, otherwise make filesystem * readonly when assertion is failed. */ if (c->assert_failed_cb) c->assert_failed_cb(c); else ubifs_ro_mode(c, -EINVAL); }