From f79d2b21614f11ea9f99dc5a3155d23439cec773 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Wed, 8 Jun 2022 09:51:14 +0200 Subject: mtd-utils: nanddump: fix writing big images on 32bit machines When writing a full 4GiB NAND to a file end_addr becomes 0x100000000. With that writing out the first page to the file doesn't happen because size_left is calculated to 0x100000000 - 0 = 0x100000000 which is then truncated to 32bit and becomes zero. Fix this by using an appropriate 64bit type for size_left. Signed-off-by: Sascha Hauer Signed-off-by: David Oberhollenzer --- nand-utils/nanddump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nand-utils/nanddump.c b/nand-utils/nanddump.c index d7fc320..47539f5 100644 --- a/nand-utils/nanddump.c +++ b/nand-utils/nanddump.c @@ -499,7 +499,7 @@ int main(int argc, char * const argv[]) } } else { /* Write requested length if oob is omitted */ - size_t size_left = end_addr - ofs; + long long size_left = end_addr - ofs; if (omitoob && (size_left < bs)) err = ofd_write(ofd, readbuf, size_left); else -- cgit v1.2.3