#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/compatmac.h>
+#include <linux/seq_file.h>
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
+ mtd->read_cnt += 1;
+ mtd->read_sz += len;
res = part->master->read(part->master, from + part->offset,
len, retlen, buf);
if (unlikely(res)) {
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
+ mtd->other_cnt += 1;
return part->master->point (part->master, from + part->offset,
len, retlen, virt, phys);
}
{
struct mtd_part *part = PART(mtd);
+ mtd->other_cnt += 1;
part->master->unpoint(part->master, from + part->offset, len);
}
return -EINVAL;
if (ops->datbuf && from + ops->len > mtd->size)
return -EINVAL;
+ mtd->other_cnt += 1;
res = part->master->read_oob(part->master, from + part->offset, ops);
if (unlikely(res)) {
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
+ mtd->other_cnt += 1;
return part->master->read_user_prot_reg(part->master, from,
len, retlen, buf);
}
struct otp_info *buf, size_t len)
{
struct mtd_part *part = PART(mtd);
+ mtd->other_cnt += 1;
return part->master->get_user_prot_info(part->master, buf, len);
}
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
+ mtd->other_cnt += 1;
return part->master->read_fact_prot_reg(part->master, from,
len, retlen, buf);
}
size_t len)
{
struct mtd_part *part = PART(mtd);
+ mtd->other_cnt += 1;
return part->master->get_fact_prot_info(part->master, buf, len);
}
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
+ mtd->write_cnt += 1;
+ mtd->write_sz += len;
return part->master->write(part->master, to + part->offset,
len, retlen, buf);
}
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
+ mtd->write_cnt += 1;
+ mtd->write_sz += len;
return part->master->panic_write(part->master, to + part->offset,
len, retlen, buf);
}
return -EINVAL;
if (ops->datbuf && to + ops->len > mtd->size)
return -EINVAL;
+ mtd->other_cnt += 1;
return part->master->write_oob(part->master, to + part->offset, ops);
}
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
+ mtd->other_cnt += 1;
return part->master->write_user_prot_reg(part->master, from,
len, retlen, buf);
}
size_t len)
{
struct mtd_part *part = PART(mtd);
+ mtd->other_cnt += 1;
return part->master->lock_user_prot_reg(part->master, from, len);
}
unsigned long count, loff_t to, size_t *retlen)
{
struct mtd_part *part = PART(mtd);
+ unsigned long i;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
+ for (i = 0; i < count; i++) {
+ mtd->write_cnt += 1;
+ mtd->write_sz += vecs[i].iov_len;
+ }
return part->master->writev(part->master, vecs, count,
to + part->offset, retlen);
}
if (instr->addr >= mtd->size)
return -EINVAL;
instr->addr += part->offset;
+ mtd->erase_cnt += 1;
+ mtd->erase_sz += instr->len;
ret = part->master->erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
+ mtd->other_cnt += 1;
return part->master->lock(part->master, ofs + part->offset, len);
}
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
+ mtd->other_cnt += 1;
return part->master->unlock(part->master, ofs + part->offset, len);
}
return -EROFS;
if (ofs >= mtd->size)
return -EINVAL;
+ mtd->other_cnt += 1;
ofs += part->offset;
res = part->master->block_markbad(part->master, ofs);
if (!res)
return ret;
}
EXPORT_SYMBOL_GPL(parse_mtd_partitions);
+
+void mtd_diskstats(struct seq_file *seqf)
+{
+ struct mtd_part *part;
+
+ list_for_each_entry(part, &mtd_partitions, list) {
+ struct mtd_info *mtd = &part->mtd;
+
+ seq_printf(seqf, "%4d %7d %s %u %u %u "
+ "%u %u %u %u %u %u %u %u %u %u %u\n",
+ MTD_CHAR_MAJOR, mtd->index << 1,
+ mtd->name,
+ mtd->read_cnt,
+ 0, /* reads merged */
+ mtd->read_sz >> 9,
+ 0, /* read time */
+ mtd->write_cnt,
+ 0, /* writes merged */
+ mtd->write_sz >> 9,
+ 0, /* write time */
+ 0, /* I/Os in progress */
+ 0, /* I/O time */
+ 0, /* weighted I/O time */
+ mtd->erase_cnt,
+ mtd->erase_sz,
+ mtd->other_cnt
+ );
+ }
+}