inode.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. /*
  2. * Compressed rom filesystem for Linux.
  3. *
  4. * Copyright (C) 1999 Linus Torvalds.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. /*
  9. * These are the VFS interfaces to the compressed rom filesystem.
  10. * The actual compression is based on zlib, see the other files.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/module.h>
  14. #include <linux/fs.h>
  15. #include <linux/file.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/pfn_t.h>
  18. #include <linux/ramfs.h>
  19. #include <linux/init.h>
  20. #include <linux/string.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/mtd/mtd.h>
  23. #include <linux/mtd/super.h>
  24. #include <linux/fs_context.h>
  25. #include <linux/slab.h>
  26. #include <linux/vfs.h>
  27. #include <linux/mutex.h>
  28. #include <uapi/linux/cramfs_fs.h>
  29. #include <linux/uaccess.h>
  30. #include "internal.h"
  31. /*
  32. * cramfs super-block data in memory
  33. */
  34. struct cramfs_sb_info {
  35. unsigned long magic;
  36. unsigned long size;
  37. unsigned long blocks;
  38. unsigned long files;
  39. unsigned long flags;
  40. void *linear_virt_addr;
  41. resource_size_t linear_phys_addr;
  42. size_t mtd_point_size;
  43. };
  44. static inline struct cramfs_sb_info *CRAMFS_SB(struct super_block *sb)
  45. {
  46. return sb->s_fs_info;
  47. }
  48. static const struct super_operations cramfs_ops;
  49. static const struct inode_operations cramfs_dir_inode_operations;
  50. static const struct file_operations cramfs_directory_operations;
  51. static const struct file_operations cramfs_physmem_fops;
  52. static const struct address_space_operations cramfs_aops;
  53. static DEFINE_MUTEX(read_mutex);
  54. /* These macros may change in future, to provide better st_ino semantics. */
  55. #define OFFSET(x) ((x)->i_ino)
  56. static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
  57. {
  58. if (!cino->offset)
  59. return offset + 1;
  60. if (!cino->size)
  61. return offset + 1;
  62. /*
  63. * The file mode test fixes buggy mkcramfs implementations where
  64. * cramfs_inode->offset is set to a non zero value for entries
  65. * which did not contain data, like devices node and fifos.
  66. */
  67. switch (cino->mode & S_IFMT) {
  68. case S_IFREG:
  69. case S_IFDIR:
  70. case S_IFLNK:
  71. return cino->offset << 2;
  72. default:
  73. break;
  74. }
  75. return offset + 1;
  76. }
  77. static struct inode *get_cramfs_inode(struct super_block *sb,
  78. const struct cramfs_inode *cramfs_inode, unsigned int offset)
  79. {
  80. struct inode *inode;
  81. static struct timespec64 zerotime;
  82. inode = iget_locked(sb, cramino(cramfs_inode, offset));
  83. if (!inode)
  84. return ERR_PTR(-ENOMEM);
  85. if (!(inode->i_state & I_NEW))
  86. return inode;
  87. switch (cramfs_inode->mode & S_IFMT) {
  88. case S_IFREG:
  89. inode->i_fop = &generic_ro_fops;
  90. inode->i_data.a_ops = &cramfs_aops;
  91. if (IS_ENABLED(CONFIG_CRAMFS_MTD) &&
  92. CRAMFS_SB(sb)->flags & CRAMFS_FLAG_EXT_BLOCK_POINTERS &&
  93. CRAMFS_SB(sb)->linear_phys_addr)
  94. inode->i_fop = &cramfs_physmem_fops;
  95. break;
  96. case S_IFDIR:
  97. inode->i_op = &cramfs_dir_inode_operations;
  98. inode->i_fop = &cramfs_directory_operations;
  99. break;
  100. case S_IFLNK:
  101. inode->i_op = &page_symlink_inode_operations;
  102. inode_nohighmem(inode);
  103. inode->i_data.a_ops = &cramfs_aops;
  104. break;
  105. case S_IFCHR:
  106. case S_IFBLK:
  107. case S_IFIFO:
  108. case S_IFSOCK:
  109. init_special_inode(inode, cramfs_inode->mode,
  110. old_decode_dev(cramfs_inode->size));
  111. break;
  112. default:
  113. printk(KERN_DEBUG "CRAMFS: Invalid file type 0%04o for inode %lu.\n",
  114. inode->i_mode, inode->i_ino);
  115. iget_failed(inode);
  116. return ERR_PTR(-EIO);
  117. }
  118. inode->i_mode = cramfs_inode->mode;
  119. i_uid_write(inode, cramfs_inode->uid);
  120. i_gid_write(inode, cramfs_inode->gid);
  121. /* if the lower 2 bits are zero, the inode contains data */
  122. if (!(inode->i_ino & 3)) {
  123. inode->i_size = cramfs_inode->size;
  124. inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
  125. }
  126. /* Struct copy intentional */
  127. inode_set_mtime_to_ts(inode,
  128. inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, zerotime)));
  129. /* inode->i_nlink is left 1 - arguably wrong for directories,
  130. but it's the best we can do without reading the directory
  131. contents. 1 yields the right result in GNU find, even
  132. without -noleaf option. */
  133. unlock_new_inode(inode);
  134. return inode;
  135. }
  136. /*
  137. * We have our own block cache: don't fill up the buffer cache
  138. * with the rom-image, because the way the filesystem is set
  139. * up the accesses should be fairly regular and cached in the
  140. * page cache and dentry tree anyway..
  141. *
  142. * This also acts as a way to guarantee contiguous areas of up to
  143. * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
  144. * worry about end-of-buffer issues even when decompressing a full
  145. * page cache.
  146. *
  147. * Note: This is all optimized away at compile time when
  148. * CONFIG_CRAMFS_BLOCKDEV=n.
  149. */
  150. #define READ_BUFFERS (2)
  151. /* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */
  152. #define NEXT_BUFFER(_ix) ((_ix) ^ 1)
  153. /*
  154. * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed"
  155. * data that takes up more space than the original and with unlucky
  156. * alignment.
  157. */
  158. #define BLKS_PER_BUF_SHIFT (2)
  159. #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT)
  160. #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE)
  161. static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
  162. static unsigned buffer_blocknr[READ_BUFFERS];
  163. static struct super_block *buffer_dev[READ_BUFFERS];
  164. static int next_buffer;
  165. /*
  166. * Populate our block cache and return a pointer to it.
  167. */
  168. static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
  169. unsigned int len)
  170. {
  171. struct address_space *mapping = sb->s_bdev->bd_mapping;
  172. struct file_ra_state ra = {};
  173. struct page *pages[BLKS_PER_BUF];
  174. unsigned i, blocknr, buffer;
  175. unsigned long devsize;
  176. char *data;
  177. if (!len)
  178. return NULL;
  179. blocknr = offset >> PAGE_SHIFT;
  180. offset &= PAGE_SIZE - 1;
  181. /* Check if an existing buffer already has the data.. */
  182. for (i = 0; i < READ_BUFFERS; i++) {
  183. unsigned int blk_offset;
  184. if (buffer_dev[i] != sb)
  185. continue;
  186. if (blocknr < buffer_blocknr[i])
  187. continue;
  188. blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
  189. blk_offset += offset;
  190. if (blk_offset > BUFFER_SIZE ||
  191. blk_offset + len > BUFFER_SIZE)
  192. continue;
  193. return read_buffers[i] + blk_offset;
  194. }
  195. devsize = bdev_nr_bytes(sb->s_bdev) >> PAGE_SHIFT;
  196. /* Ok, read in BLKS_PER_BUF pages completely first. */
  197. file_ra_state_init(&ra, mapping);
  198. page_cache_sync_readahead(mapping, &ra, NULL, blocknr, BLKS_PER_BUF);
  199. for (i = 0; i < BLKS_PER_BUF; i++) {
  200. struct page *page = NULL;
  201. if (blocknr + i < devsize) {
  202. page = read_mapping_page(mapping, blocknr + i, NULL);
  203. /* synchronous error? */
  204. if (IS_ERR(page))
  205. page = NULL;
  206. }
  207. pages[i] = page;
  208. }
  209. buffer = next_buffer;
  210. next_buffer = NEXT_BUFFER(buffer);
  211. buffer_blocknr[buffer] = blocknr;
  212. buffer_dev[buffer] = sb;
  213. data = read_buffers[buffer];
  214. for (i = 0; i < BLKS_PER_BUF; i++) {
  215. struct page *page = pages[i];
  216. if (page) {
  217. memcpy_from_page(data, page, 0, PAGE_SIZE);
  218. put_page(page);
  219. } else
  220. memset(data, 0, PAGE_SIZE);
  221. data += PAGE_SIZE;
  222. }
  223. return read_buffers[buffer] + offset;
  224. }
  225. /*
  226. * Return a pointer to the linearly addressed cramfs image in memory.
  227. */
  228. static void *cramfs_direct_read(struct super_block *sb, unsigned int offset,
  229. unsigned int len)
  230. {
  231. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  232. if (!len)
  233. return NULL;
  234. if (len > sbi->size || offset > sbi->size - len)
  235. return page_address(ZERO_PAGE(0));
  236. return sbi->linear_virt_addr + offset;
  237. }
  238. /*
  239. * Returns a pointer to a buffer containing at least LEN bytes of
  240. * filesystem starting at byte offset OFFSET into the filesystem.
  241. */
  242. static void *cramfs_read(struct super_block *sb, unsigned int offset,
  243. unsigned int len)
  244. {
  245. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  246. if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sbi->linear_virt_addr)
  247. return cramfs_direct_read(sb, offset, len);
  248. else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
  249. return cramfs_blkdev_read(sb, offset, len);
  250. else
  251. return NULL;
  252. }
  253. /*
  254. * For a mapping to be possible, we need a range of uncompressed and
  255. * contiguous blocks. Return the offset for the first block and number of
  256. * valid blocks for which that is true, or zero otherwise.
  257. */
  258. static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages)
  259. {
  260. struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
  261. int i;
  262. u32 *blockptrs, first_block_addr;
  263. /*
  264. * We can dereference memory directly here as this code may be
  265. * reached only when there is a direct filesystem image mapping
  266. * available in memory.
  267. */
  268. blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode) + pgoff * 4);
  269. first_block_addr = blockptrs[0] & ~CRAMFS_BLK_FLAGS;
  270. i = 0;
  271. do {
  272. u32 block_off = i * (PAGE_SIZE >> CRAMFS_BLK_DIRECT_PTR_SHIFT);
  273. u32 expect = (first_block_addr + block_off) |
  274. CRAMFS_BLK_FLAG_DIRECT_PTR |
  275. CRAMFS_BLK_FLAG_UNCOMPRESSED;
  276. if (blockptrs[i] != expect) {
  277. pr_debug("range: block %d/%d got %#x expects %#x\n",
  278. pgoff+i, pgoff + *pages - 1,
  279. blockptrs[i], expect);
  280. if (i == 0)
  281. return 0;
  282. break;
  283. }
  284. } while (++i < *pages);
  285. *pages = i;
  286. return first_block_addr << CRAMFS_BLK_DIRECT_PTR_SHIFT;
  287. }
  288. #ifdef CONFIG_MMU
  289. /*
  290. * Return true if the last page of a file in the filesystem image contains
  291. * some other data that doesn't belong to that file. It is assumed that the
  292. * last block is CRAMFS_BLK_FLAG_DIRECT_PTR | CRAMFS_BLK_FLAG_UNCOMPRESSED
  293. * (verified by cramfs_get_block_range() and directly accessible in memory.
  294. */
  295. static bool cramfs_last_page_is_shared(struct inode *inode)
  296. {
  297. struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
  298. u32 partial, last_page, blockaddr, *blockptrs;
  299. char *tail_data;
  300. partial = offset_in_page(inode->i_size);
  301. if (!partial)
  302. return false;
  303. last_page = inode->i_size >> PAGE_SHIFT;
  304. blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode));
  305. blockaddr = blockptrs[last_page] & ~CRAMFS_BLK_FLAGS;
  306. blockaddr <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
  307. tail_data = sbi->linear_virt_addr + blockaddr + partial;
  308. return memchr_inv(tail_data, 0, PAGE_SIZE - partial) ? true : false;
  309. }
  310. static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
  311. {
  312. struct inode *inode = file_inode(file);
  313. struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
  314. unsigned int pages, max_pages, offset;
  315. unsigned long address, pgoff = vma->vm_pgoff;
  316. char *bailout_reason;
  317. int ret;
  318. ret = generic_file_readonly_mmap(file, vma);
  319. if (ret)
  320. return ret;
  321. /*
  322. * Now try to pre-populate ptes for this vma with a direct
  323. * mapping avoiding memory allocation when possible.
  324. */
  325. /* Could COW work here? */
  326. bailout_reason = "vma is writable";
  327. if (vma->vm_flags & VM_WRITE)
  328. goto bailout;
  329. max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  330. bailout_reason = "beyond file limit";
  331. if (pgoff >= max_pages)
  332. goto bailout;
  333. pages = min(vma_pages(vma), max_pages - pgoff);
  334. offset = cramfs_get_block_range(inode, pgoff, &pages);
  335. bailout_reason = "unsuitable block layout";
  336. if (!offset)
  337. goto bailout;
  338. address = sbi->linear_phys_addr + offset;
  339. bailout_reason = "data is not page aligned";
  340. if (!PAGE_ALIGNED(address))
  341. goto bailout;
  342. /* Don't map the last page if it contains some other data */
  343. if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) {
  344. pr_debug("mmap: %pD: last page is shared\n", file);
  345. pages--;
  346. }
  347. if (!pages) {
  348. bailout_reason = "no suitable block remaining";
  349. goto bailout;
  350. }
  351. if (pages == vma_pages(vma)) {
  352. /*
  353. * The entire vma is mappable. remap_pfn_range() will
  354. * make it distinguishable from a non-direct mapping
  355. * in /proc/<pid>/maps by substituting the file offset
  356. * with the actual physical address.
  357. */
  358. ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
  359. pages * PAGE_SIZE, vma->vm_page_prot);
  360. } else {
  361. /*
  362. * Let's create a mixed map if we can't map it all.
  363. * The normal paging machinery will take care of the
  364. * unpopulated ptes via cramfs_read_folio().
  365. */
  366. int i;
  367. vm_flags_set(vma, VM_MIXEDMAP);
  368. for (i = 0; i < pages && !ret; i++) {
  369. vm_fault_t vmf;
  370. unsigned long off = i * PAGE_SIZE;
  371. pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
  372. vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
  373. if (vmf & VM_FAULT_ERROR)
  374. ret = vm_fault_to_errno(vmf, 0);
  375. }
  376. }
  377. if (!ret)
  378. pr_debug("mapped %pD[%lu] at 0x%08lx (%u/%lu pages) "
  379. "to vma 0x%08lx, page_prot 0x%llx\n", file,
  380. pgoff, address, pages, vma_pages(vma), vma->vm_start,
  381. (unsigned long long)pgprot_val(vma->vm_page_prot));
  382. return ret;
  383. bailout:
  384. pr_debug("%pD[%lu]: direct mmap impossible: %s\n",
  385. file, pgoff, bailout_reason);
  386. /* Didn't manage any direct map, but normal paging is still possible */
  387. return 0;
  388. }
  389. #else /* CONFIG_MMU */
  390. static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
  391. {
  392. return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -ENOSYS;
  393. }
  394. static unsigned long cramfs_physmem_get_unmapped_area(struct file *file,
  395. unsigned long addr, unsigned long len,
  396. unsigned long pgoff, unsigned long flags)
  397. {
  398. struct inode *inode = file_inode(file);
  399. struct super_block *sb = inode->i_sb;
  400. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  401. unsigned int pages, block_pages, max_pages, offset;
  402. pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  403. max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  404. if (pgoff >= max_pages || pages > max_pages - pgoff)
  405. return -EINVAL;
  406. block_pages = pages;
  407. offset = cramfs_get_block_range(inode, pgoff, &block_pages);
  408. if (!offset || block_pages != pages)
  409. return -ENOSYS;
  410. addr = sbi->linear_phys_addr + offset;
  411. pr_debug("get_unmapped for %pD ofs %#lx siz %lu at 0x%08lx\n",
  412. file, pgoff*PAGE_SIZE, len, addr);
  413. return addr;
  414. }
  415. static unsigned int cramfs_physmem_mmap_capabilities(struct file *file)
  416. {
  417. return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT |
  418. NOMMU_MAP_READ | NOMMU_MAP_EXEC;
  419. }
  420. #endif /* CONFIG_MMU */
  421. static const struct file_operations cramfs_physmem_fops = {
  422. .llseek = generic_file_llseek,
  423. .read_iter = generic_file_read_iter,
  424. .splice_read = filemap_splice_read,
  425. .mmap = cramfs_physmem_mmap,
  426. #ifndef CONFIG_MMU
  427. .get_unmapped_area = cramfs_physmem_get_unmapped_area,
  428. .mmap_capabilities = cramfs_physmem_mmap_capabilities,
  429. #endif
  430. };
  431. static void cramfs_kill_sb(struct super_block *sb)
  432. {
  433. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  434. generic_shutdown_super(sb);
  435. if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sb->s_mtd) {
  436. if (sbi && sbi->mtd_point_size)
  437. mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size);
  438. put_mtd_device(sb->s_mtd);
  439. sb->s_mtd = NULL;
  440. } else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
  441. sync_blockdev(sb->s_bdev);
  442. bdev_fput(sb->s_bdev_file);
  443. }
  444. kfree(sbi);
  445. }
  446. static int cramfs_reconfigure(struct fs_context *fc)
  447. {
  448. sync_filesystem(fc->root->d_sb);
  449. fc->sb_flags |= SB_RDONLY;
  450. return 0;
  451. }
  452. static int cramfs_read_super(struct super_block *sb, struct fs_context *fc,
  453. struct cramfs_super *super)
  454. {
  455. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  456. unsigned long root_offset;
  457. bool silent = fc->sb_flags & SB_SILENT;
  458. /* We don't know the real size yet */
  459. sbi->size = PAGE_SIZE;
  460. /* Read the first block and get the superblock from it */
  461. mutex_lock(&read_mutex);
  462. memcpy(super, cramfs_read(sb, 0, sizeof(*super)), sizeof(*super));
  463. mutex_unlock(&read_mutex);
  464. /* Do sanity checks on the superblock */
  465. if (super->magic != CRAMFS_MAGIC) {
  466. /* check for wrong endianness */
  467. if (super->magic == CRAMFS_MAGIC_WEND) {
  468. if (!silent)
  469. errorfc(fc, "wrong endianness");
  470. return -EINVAL;
  471. }
  472. /* check at 512 byte offset */
  473. mutex_lock(&read_mutex);
  474. memcpy(super,
  475. cramfs_read(sb, 512, sizeof(*super)),
  476. sizeof(*super));
  477. mutex_unlock(&read_mutex);
  478. if (super->magic != CRAMFS_MAGIC) {
  479. if (super->magic == CRAMFS_MAGIC_WEND && !silent)
  480. errorfc(fc, "wrong endianness");
  481. else if (!silent)
  482. errorfc(fc, "wrong magic");
  483. return -EINVAL;
  484. }
  485. }
  486. /* get feature flags first */
  487. if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) {
  488. errorfc(fc, "unsupported filesystem features");
  489. return -EINVAL;
  490. }
  491. /* Check that the root inode is in a sane state */
  492. if (!S_ISDIR(super->root.mode)) {
  493. errorfc(fc, "root is not a directory");
  494. return -EINVAL;
  495. }
  496. /* correct strange, hard-coded permissions of mkcramfs */
  497. super->root.mode |= 0555;
  498. root_offset = super->root.offset << 2;
  499. if (super->flags & CRAMFS_FLAG_FSID_VERSION_2) {
  500. sbi->size = super->size;
  501. sbi->blocks = super->fsid.blocks;
  502. sbi->files = super->fsid.files;
  503. } else {
  504. sbi->size = 1<<28;
  505. sbi->blocks = 0;
  506. sbi->files = 0;
  507. }
  508. sbi->magic = super->magic;
  509. sbi->flags = super->flags;
  510. if (root_offset == 0)
  511. infofc(fc, "empty filesystem");
  512. else if (!(super->flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
  513. ((root_offset != sizeof(struct cramfs_super)) &&
  514. (root_offset != 512 + sizeof(struct cramfs_super))))
  515. {
  516. errorfc(fc, "bad root offset %lu", root_offset);
  517. return -EINVAL;
  518. }
  519. return 0;
  520. }
  521. static int cramfs_finalize_super(struct super_block *sb,
  522. struct cramfs_inode *cramfs_root)
  523. {
  524. struct inode *root;
  525. /* Set it all up.. */
  526. sb->s_flags |= SB_RDONLY;
  527. sb->s_time_min = 0;
  528. sb->s_time_max = 0;
  529. sb->s_op = &cramfs_ops;
  530. root = get_cramfs_inode(sb, cramfs_root, 0);
  531. if (IS_ERR(root))
  532. return PTR_ERR(root);
  533. sb->s_root = d_make_root(root);
  534. if (!sb->s_root)
  535. return -ENOMEM;
  536. return 0;
  537. }
  538. static int cramfs_blkdev_fill_super(struct super_block *sb, struct fs_context *fc)
  539. {
  540. struct cramfs_sb_info *sbi;
  541. struct cramfs_super super;
  542. int i, err;
  543. sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
  544. if (!sbi)
  545. return -ENOMEM;
  546. sb->s_fs_info = sbi;
  547. /* Invalidate the read buffers on mount: think disk change.. */
  548. for (i = 0; i < READ_BUFFERS; i++)
  549. buffer_blocknr[i] = -1;
  550. err = cramfs_read_super(sb, fc, &super);
  551. if (err)
  552. return err;
  553. return cramfs_finalize_super(sb, &super.root);
  554. }
  555. static int cramfs_mtd_fill_super(struct super_block *sb, struct fs_context *fc)
  556. {
  557. struct cramfs_sb_info *sbi;
  558. struct cramfs_super super;
  559. int err;
  560. sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
  561. if (!sbi)
  562. return -ENOMEM;
  563. sb->s_fs_info = sbi;
  564. /* Map only one page for now. Will remap it when fs size is known. */
  565. err = mtd_point(sb->s_mtd, 0, PAGE_SIZE, &sbi->mtd_point_size,
  566. &sbi->linear_virt_addr, &sbi->linear_phys_addr);
  567. if (err || sbi->mtd_point_size != PAGE_SIZE) {
  568. pr_err("unable to get direct memory access to mtd:%s\n",
  569. sb->s_mtd->name);
  570. return err ? : -ENODATA;
  571. }
  572. pr_info("checking physical address %pap for linear cramfs image\n",
  573. &sbi->linear_phys_addr);
  574. err = cramfs_read_super(sb, fc, &super);
  575. if (err)
  576. return err;
  577. /* Remap the whole filesystem now */
  578. pr_info("linear cramfs image on mtd:%s appears to be %lu KB in size\n",
  579. sb->s_mtd->name, sbi->size/1024);
  580. mtd_unpoint(sb->s_mtd, 0, PAGE_SIZE);
  581. err = mtd_point(sb->s_mtd, 0, sbi->size, &sbi->mtd_point_size,
  582. &sbi->linear_virt_addr, &sbi->linear_phys_addr);
  583. if (err || sbi->mtd_point_size != sbi->size) {
  584. pr_err("unable to get direct memory access to mtd:%s\n",
  585. sb->s_mtd->name);
  586. return err ? : -ENODATA;
  587. }
  588. return cramfs_finalize_super(sb, &super.root);
  589. }
  590. static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  591. {
  592. struct super_block *sb = dentry->d_sb;
  593. u64 id = 0;
  594. if (sb->s_bdev)
  595. id = huge_encode_dev(sb->s_bdev->bd_dev);
  596. else if (sb->s_dev)
  597. id = huge_encode_dev(sb->s_dev);
  598. buf->f_type = CRAMFS_MAGIC;
  599. buf->f_bsize = PAGE_SIZE;
  600. buf->f_blocks = CRAMFS_SB(sb)->blocks;
  601. buf->f_bfree = 0;
  602. buf->f_bavail = 0;
  603. buf->f_files = CRAMFS_SB(sb)->files;
  604. buf->f_ffree = 0;
  605. buf->f_fsid = u64_to_fsid(id);
  606. buf->f_namelen = CRAMFS_MAXPATHLEN;
  607. return 0;
  608. }
  609. /*
  610. * Read a cramfs directory entry.
  611. */
  612. static int cramfs_readdir(struct file *file, struct dir_context *ctx)
  613. {
  614. struct inode *inode = file_inode(file);
  615. struct super_block *sb = inode->i_sb;
  616. char *buf;
  617. unsigned int offset;
  618. /* Offset within the thing. */
  619. if (ctx->pos >= inode->i_size)
  620. return 0;
  621. offset = ctx->pos;
  622. /* Directory entries are always 4-byte aligned */
  623. if (offset & 3)
  624. return -EINVAL;
  625. buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL);
  626. if (!buf)
  627. return -ENOMEM;
  628. while (offset < inode->i_size) {
  629. struct cramfs_inode *de;
  630. unsigned long nextoffset;
  631. char *name;
  632. ino_t ino;
  633. umode_t mode;
  634. int namelen;
  635. mutex_lock(&read_mutex);
  636. de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
  637. name = (char *)(de+1);
  638. /*
  639. * Namelengths on disk are shifted by two
  640. * and the name padded out to 4-byte boundaries
  641. * with zeroes.
  642. */
  643. namelen = de->namelen << 2;
  644. memcpy(buf, name, namelen);
  645. ino = cramino(de, OFFSET(inode) + offset);
  646. mode = de->mode;
  647. mutex_unlock(&read_mutex);
  648. nextoffset = offset + sizeof(*de) + namelen;
  649. for (;;) {
  650. if (!namelen) {
  651. kfree(buf);
  652. return -EIO;
  653. }
  654. if (buf[namelen-1])
  655. break;
  656. namelen--;
  657. }
  658. if (!dir_emit(ctx, buf, namelen, ino, mode >> 12))
  659. break;
  660. ctx->pos = offset = nextoffset;
  661. }
  662. kfree(buf);
  663. return 0;
  664. }
  665. /*
  666. * Lookup and fill in the inode data..
  667. */
  668. static struct dentry *cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
  669. {
  670. unsigned int offset = 0;
  671. struct inode *inode = NULL;
  672. int sorted;
  673. mutex_lock(&read_mutex);
  674. sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
  675. while (offset < dir->i_size) {
  676. struct cramfs_inode *de;
  677. char *name;
  678. int namelen, retval;
  679. int dir_off = OFFSET(dir) + offset;
  680. de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN);
  681. name = (char *)(de+1);
  682. /* Try to take advantage of sorted directories */
  683. if (sorted && (dentry->d_name.name[0] < name[0]))
  684. break;
  685. namelen = de->namelen << 2;
  686. offset += sizeof(*de) + namelen;
  687. /* Quick check that the name is roughly the right length */
  688. if (((dentry->d_name.len + 3) & ~3) != namelen)
  689. continue;
  690. for (;;) {
  691. if (!namelen) {
  692. inode = ERR_PTR(-EIO);
  693. goto out;
  694. }
  695. if (name[namelen-1])
  696. break;
  697. namelen--;
  698. }
  699. if (namelen != dentry->d_name.len)
  700. continue;
  701. retval = memcmp(dentry->d_name.name, name, namelen);
  702. if (retval > 0)
  703. continue;
  704. if (!retval) {
  705. inode = get_cramfs_inode(dir->i_sb, de, dir_off);
  706. break;
  707. }
  708. /* else (retval < 0) */
  709. if (sorted)
  710. break;
  711. }
  712. out:
  713. mutex_unlock(&read_mutex);
  714. return d_splice_alias(inode, dentry);
  715. }
  716. static int cramfs_read_folio(struct file *file, struct folio *folio)
  717. {
  718. struct inode *inode = folio->mapping->host;
  719. u32 maxblock;
  720. int bytes_filled;
  721. void *pgdata;
  722. bool success = false;
  723. maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  724. bytes_filled = 0;
  725. pgdata = kmap_local_folio(folio, 0);
  726. if (folio->index < maxblock) {
  727. struct super_block *sb = inode->i_sb;
  728. u32 blkptr_offset = OFFSET(inode) + folio->index * 4;
  729. u32 block_ptr, block_start, block_len;
  730. bool uncompressed, direct;
  731. mutex_lock(&read_mutex);
  732. block_ptr = *(u32 *) cramfs_read(sb, blkptr_offset, 4);
  733. uncompressed = (block_ptr & CRAMFS_BLK_FLAG_UNCOMPRESSED);
  734. direct = (block_ptr & CRAMFS_BLK_FLAG_DIRECT_PTR);
  735. block_ptr &= ~CRAMFS_BLK_FLAGS;
  736. if (direct) {
  737. /*
  738. * The block pointer is an absolute start pointer,
  739. * shifted by 2 bits. The size is included in the
  740. * first 2 bytes of the data block when compressed,
  741. * or PAGE_SIZE otherwise.
  742. */
  743. block_start = block_ptr << CRAMFS_BLK_DIRECT_PTR_SHIFT;
  744. if (uncompressed) {
  745. block_len = PAGE_SIZE;
  746. /* if last block: cap to file length */
  747. if (folio->index == maxblock - 1)
  748. block_len =
  749. offset_in_page(inode->i_size);
  750. } else {
  751. block_len = *(u16 *)
  752. cramfs_read(sb, block_start, 2);
  753. block_start += 2;
  754. }
  755. } else {
  756. /*
  757. * The block pointer indicates one past the end of
  758. * the current block (start of next block). If this
  759. * is the first block then it starts where the block
  760. * pointer table ends, otherwise its start comes
  761. * from the previous block's pointer.
  762. */
  763. block_start = OFFSET(inode) + maxblock * 4;
  764. if (folio->index)
  765. block_start = *(u32 *)
  766. cramfs_read(sb, blkptr_offset - 4, 4);
  767. /* Beware... previous ptr might be a direct ptr */
  768. if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) {
  769. /* See comments on earlier code. */
  770. u32 prev_start = block_start;
  771. block_start = prev_start & ~CRAMFS_BLK_FLAGS;
  772. block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
  773. if (prev_start & CRAMFS_BLK_FLAG_UNCOMPRESSED) {
  774. block_start += PAGE_SIZE;
  775. } else {
  776. block_len = *(u16 *)
  777. cramfs_read(sb, block_start, 2);
  778. block_start += 2 + block_len;
  779. }
  780. }
  781. block_start &= ~CRAMFS_BLK_FLAGS;
  782. block_len = block_ptr - block_start;
  783. }
  784. if (block_len == 0)
  785. ; /* hole */
  786. else if (unlikely(block_len > 2*PAGE_SIZE ||
  787. (uncompressed && block_len > PAGE_SIZE))) {
  788. mutex_unlock(&read_mutex);
  789. pr_err("bad data blocksize %u\n", block_len);
  790. goto err;
  791. } else if (uncompressed) {
  792. memcpy(pgdata,
  793. cramfs_read(sb, block_start, block_len),
  794. block_len);
  795. bytes_filled = block_len;
  796. } else {
  797. bytes_filled = cramfs_uncompress_block(pgdata,
  798. PAGE_SIZE,
  799. cramfs_read(sb, block_start, block_len),
  800. block_len);
  801. }
  802. mutex_unlock(&read_mutex);
  803. if (unlikely(bytes_filled < 0))
  804. goto err;
  805. }
  806. memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
  807. flush_dcache_folio(folio);
  808. success = true;
  809. err:
  810. kunmap_local(pgdata);
  811. folio_end_read(folio, success);
  812. return 0;
  813. }
  814. static const struct address_space_operations cramfs_aops = {
  815. .read_folio = cramfs_read_folio
  816. };
  817. /*
  818. * Our operations:
  819. */
  820. /*
  821. * A directory can only readdir
  822. */
  823. static const struct file_operations cramfs_directory_operations = {
  824. .llseek = generic_file_llseek,
  825. .read = generic_read_dir,
  826. .iterate_shared = cramfs_readdir,
  827. };
  828. static const struct inode_operations cramfs_dir_inode_operations = {
  829. .lookup = cramfs_lookup,
  830. };
  831. static const struct super_operations cramfs_ops = {
  832. .statfs = cramfs_statfs,
  833. };
  834. static int cramfs_get_tree(struct fs_context *fc)
  835. {
  836. int ret = -ENOPROTOOPT;
  837. if (IS_ENABLED(CONFIG_CRAMFS_MTD)) {
  838. ret = get_tree_mtd(fc, cramfs_mtd_fill_super);
  839. if (!ret)
  840. return 0;
  841. }
  842. if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
  843. ret = get_tree_bdev(fc, cramfs_blkdev_fill_super);
  844. return ret;
  845. }
  846. static const struct fs_context_operations cramfs_context_ops = {
  847. .get_tree = cramfs_get_tree,
  848. .reconfigure = cramfs_reconfigure,
  849. };
  850. /*
  851. * Set up the filesystem mount context.
  852. */
  853. static int cramfs_init_fs_context(struct fs_context *fc)
  854. {
  855. fc->ops = &cramfs_context_ops;
  856. return 0;
  857. }
  858. static struct file_system_type cramfs_fs_type = {
  859. .owner = THIS_MODULE,
  860. .name = "cramfs",
  861. .init_fs_context = cramfs_init_fs_context,
  862. .kill_sb = cramfs_kill_sb,
  863. .fs_flags = FS_REQUIRES_DEV,
  864. };
  865. MODULE_ALIAS_FS("cramfs");
  866. static int __init init_cramfs_fs(void)
  867. {
  868. int rv;
  869. rv = cramfs_uncompress_init();
  870. if (rv < 0)
  871. return rv;
  872. rv = register_filesystem(&cramfs_fs_type);
  873. if (rv < 0)
  874. cramfs_uncompress_exit();
  875. return rv;
  876. }
  877. static void __exit exit_cramfs_fs(void)
  878. {
  879. cramfs_uncompress_exit();
  880. unregister_filesystem(&cramfs_fs_type);
  881. }
  882. module_init(init_cramfs_fs)
  883. module_exit(exit_cramfs_fs)
  884. MODULE_DESCRIPTION("Compressed ROM file system support");
  885. MODULE_LICENSE("GPL");