file.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/affs/file.c
  4. *
  5. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  6. *
  7. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  8. *
  9. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  10. *
  11. * (C) 1991 Linus Torvalds - minix filesystem
  12. *
  13. * affs regular file handling primitives
  14. */
  15. #include <linux/uio.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/mpage.h>
  18. #include "affs.h"
  19. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  20. static int
  21. affs_file_open(struct inode *inode, struct file *filp)
  22. {
  23. pr_debug("open(%lu,%d)\n",
  24. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  25. atomic_inc(&AFFS_I(inode)->i_opencnt);
  26. return 0;
  27. }
  28. static int
  29. affs_file_release(struct inode *inode, struct file *filp)
  30. {
  31. pr_debug("release(%lu, %d)\n",
  32. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  33. if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
  34. inode_lock(inode);
  35. if (inode->i_size != AFFS_I(inode)->mmu_private)
  36. affs_truncate(inode);
  37. affs_free_prealloc(inode);
  38. inode_unlock(inode);
  39. }
  40. return 0;
  41. }
  42. static int
  43. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  44. {
  45. struct super_block *sb = inode->i_sb;
  46. struct buffer_head *bh;
  47. u32 lc_max;
  48. int i, j, key;
  49. if (!AFFS_I(inode)->i_lc) {
  50. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  51. if (!ptr)
  52. return -ENOMEM;
  53. AFFS_I(inode)->i_lc = (u32 *)ptr;
  54. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  55. }
  56. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  57. if (AFFS_I(inode)->i_extcnt > lc_max) {
  58. u32 lc_shift, lc_mask, tmp, off;
  59. /* need to recalculate linear cache, start from old size */
  60. lc_shift = AFFS_I(inode)->i_lc_shift;
  61. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  62. for (; tmp; tmp >>= 1)
  63. lc_shift++;
  64. lc_mask = (1 << lc_shift) - 1;
  65. /* fix idx and old size to new shift */
  66. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  67. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  68. /* first shrink old cache to make more space */
  69. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  70. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  71. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  72. AFFS_I(inode)->i_lc_shift = lc_shift;
  73. AFFS_I(inode)->i_lc_mask = lc_mask;
  74. }
  75. /* fill cache to the needed index */
  76. i = AFFS_I(inode)->i_lc_size;
  77. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  78. for (; i <= lc_idx; i++) {
  79. if (!i) {
  80. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  81. continue;
  82. }
  83. key = AFFS_I(inode)->i_lc[i - 1];
  84. j = AFFS_I(inode)->i_lc_mask + 1;
  85. // unlock cache
  86. for (; j > 0; j--) {
  87. bh = affs_bread(sb, key);
  88. if (!bh)
  89. goto err;
  90. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  91. affs_brelse(bh);
  92. }
  93. // lock cache
  94. AFFS_I(inode)->i_lc[i] = key;
  95. }
  96. return 0;
  97. err:
  98. // lock cache
  99. return -EIO;
  100. }
  101. static struct buffer_head *
  102. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  103. {
  104. struct super_block *sb = inode->i_sb;
  105. struct buffer_head *new_bh;
  106. u32 blocknr, tmp;
  107. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  108. if (!blocknr)
  109. return ERR_PTR(-ENOSPC);
  110. new_bh = affs_getzeroblk(sb, blocknr);
  111. if (!new_bh) {
  112. affs_free_block(sb, blocknr);
  113. return ERR_PTR(-EIO);
  114. }
  115. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  116. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  117. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  118. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  119. affs_fix_checksum(sb, new_bh);
  120. mark_buffer_dirty_inode(new_bh, inode);
  121. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  122. if (tmp)
  123. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  124. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  125. affs_adjust_checksum(bh, blocknr - tmp);
  126. mark_buffer_dirty_inode(bh, inode);
  127. AFFS_I(inode)->i_extcnt++;
  128. mark_inode_dirty(inode);
  129. return new_bh;
  130. }
  131. static inline struct buffer_head *
  132. affs_get_extblock(struct inode *inode, u32 ext)
  133. {
  134. /* inline the simplest case: same extended block as last time */
  135. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  136. if (ext == AFFS_I(inode)->i_ext_last)
  137. get_bh(bh);
  138. else
  139. /* we have to do more (not inlined) */
  140. bh = affs_get_extblock_slow(inode, ext);
  141. return bh;
  142. }
  143. static struct buffer_head *
  144. affs_get_extblock_slow(struct inode *inode, u32 ext)
  145. {
  146. struct super_block *sb = inode->i_sb;
  147. struct buffer_head *bh;
  148. u32 ext_key;
  149. u32 lc_idx, lc_off, ac_idx;
  150. u32 tmp, idx;
  151. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  152. /* read the next extended block from the current one */
  153. bh = AFFS_I(inode)->i_ext_bh;
  154. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  155. if (ext < AFFS_I(inode)->i_extcnt)
  156. goto read_ext;
  157. BUG_ON(ext > AFFS_I(inode)->i_extcnt);
  158. bh = affs_alloc_extblock(inode, bh, ext);
  159. if (IS_ERR(bh))
  160. return bh;
  161. goto store_ext;
  162. }
  163. if (ext == 0) {
  164. /* we seek back to the file header block */
  165. ext_key = inode->i_ino;
  166. goto read_ext;
  167. }
  168. if (ext >= AFFS_I(inode)->i_extcnt) {
  169. struct buffer_head *prev_bh;
  170. /* allocate a new extended block */
  171. BUG_ON(ext > AFFS_I(inode)->i_extcnt);
  172. /* get previous extended block */
  173. prev_bh = affs_get_extblock(inode, ext - 1);
  174. if (IS_ERR(prev_bh))
  175. return prev_bh;
  176. bh = affs_alloc_extblock(inode, prev_bh, ext);
  177. affs_brelse(prev_bh);
  178. if (IS_ERR(bh))
  179. return bh;
  180. goto store_ext;
  181. }
  182. again:
  183. /* check if there is an extended cache and whether it's large enough */
  184. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  185. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  186. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  187. int err;
  188. err = affs_grow_extcache(inode, lc_idx);
  189. if (err)
  190. return ERR_PTR(err);
  191. goto again;
  192. }
  193. /* every n'th key we find in the linear cache */
  194. if (!lc_off) {
  195. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  196. goto read_ext;
  197. }
  198. /* maybe it's still in the associative cache */
  199. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  200. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  201. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  202. goto read_ext;
  203. }
  204. /* try to find one of the previous extended blocks */
  205. tmp = ext;
  206. idx = ac_idx;
  207. while (--tmp, --lc_off > 0) {
  208. idx = (idx - 1) & AFFS_AC_MASK;
  209. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  210. ext_key = AFFS_I(inode)->i_ac[idx].key;
  211. goto find_ext;
  212. }
  213. }
  214. /* fall back to the linear cache */
  215. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  216. find_ext:
  217. /* read all extended blocks until we find the one we need */
  218. //unlock cache
  219. do {
  220. bh = affs_bread(sb, ext_key);
  221. if (!bh)
  222. goto err_bread;
  223. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  224. affs_brelse(bh);
  225. tmp++;
  226. } while (tmp < ext);
  227. //lock cache
  228. /* store it in the associative cache */
  229. // recalculate ac_idx?
  230. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  231. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  232. read_ext:
  233. /* finally read the right extended block */
  234. //unlock cache
  235. bh = affs_bread(sb, ext_key);
  236. if (!bh)
  237. goto err_bread;
  238. //lock cache
  239. store_ext:
  240. /* release old cached extended block and store the new one */
  241. affs_brelse(AFFS_I(inode)->i_ext_bh);
  242. AFFS_I(inode)->i_ext_last = ext;
  243. AFFS_I(inode)->i_ext_bh = bh;
  244. get_bh(bh);
  245. return bh;
  246. err_bread:
  247. affs_brelse(bh);
  248. return ERR_PTR(-EIO);
  249. }
  250. static int
  251. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  252. {
  253. struct super_block *sb = inode->i_sb;
  254. struct buffer_head *ext_bh;
  255. u32 ext;
  256. pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino,
  257. (unsigned long long)block);
  258. BUG_ON(block > (sector_t)0x7fffffffUL);
  259. if (block >= AFFS_I(inode)->i_blkcnt) {
  260. if (block > AFFS_I(inode)->i_blkcnt || !create)
  261. goto err_big;
  262. } else
  263. create = 0;
  264. //lock cache
  265. affs_lock_ext(inode);
  266. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  267. block -= ext * AFFS_SB(sb)->s_hashsize;
  268. ext_bh = affs_get_extblock(inode, ext);
  269. if (IS_ERR(ext_bh))
  270. goto err_ext;
  271. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  272. if (create) {
  273. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  274. if (!blocknr)
  275. goto err_alloc;
  276. set_buffer_new(bh_result);
  277. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  278. AFFS_I(inode)->i_blkcnt++;
  279. /* store new block */
  280. if (bh_result->b_blocknr)
  281. affs_warning(sb, "get_block",
  282. "block already set (%llx)",
  283. (unsigned long long)bh_result->b_blocknr);
  284. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  285. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  286. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  287. bh_result->b_blocknr = blocknr;
  288. if (!block) {
  289. /* insert first block into header block */
  290. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  291. if (tmp)
  292. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  293. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  294. affs_adjust_checksum(ext_bh, blocknr - tmp);
  295. }
  296. }
  297. affs_brelse(ext_bh);
  298. //unlock cache
  299. affs_unlock_ext(inode);
  300. return 0;
  301. err_big:
  302. affs_error(inode->i_sb, "get_block", "strange block request %llu",
  303. (unsigned long long)block);
  304. return -EIO;
  305. err_ext:
  306. // unlock cache
  307. affs_unlock_ext(inode);
  308. return PTR_ERR(ext_bh);
  309. err_alloc:
  310. brelse(ext_bh);
  311. clear_buffer_mapped(bh_result);
  312. bh_result->b_bdev = NULL;
  313. // unlock cache
  314. affs_unlock_ext(inode);
  315. return -ENOSPC;
  316. }
  317. static int affs_writepages(struct address_space *mapping,
  318. struct writeback_control *wbc)
  319. {
  320. return mpage_writepages(mapping, wbc, affs_get_block);
  321. }
  322. static int affs_read_folio(struct file *file, struct folio *folio)
  323. {
  324. return block_read_full_folio(folio, affs_get_block);
  325. }
  326. static void affs_write_failed(struct address_space *mapping, loff_t to)
  327. {
  328. struct inode *inode = mapping->host;
  329. if (to > inode->i_size) {
  330. truncate_pagecache(inode, inode->i_size);
  331. affs_truncate(inode);
  332. }
  333. }
  334. static ssize_t
  335. affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  336. {
  337. struct file *file = iocb->ki_filp;
  338. struct address_space *mapping = file->f_mapping;
  339. struct inode *inode = mapping->host;
  340. size_t count = iov_iter_count(iter);
  341. loff_t offset = iocb->ki_pos;
  342. ssize_t ret;
  343. if (iov_iter_rw(iter) == WRITE) {
  344. loff_t size = offset + count;
  345. if (AFFS_I(inode)->mmu_private < size)
  346. return 0;
  347. }
  348. ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block);
  349. if (ret < 0 && iov_iter_rw(iter) == WRITE)
  350. affs_write_failed(mapping, offset + count);
  351. return ret;
  352. }
  353. static int affs_write_begin(struct file *file, struct address_space *mapping,
  354. loff_t pos, unsigned len,
  355. struct folio **foliop, void **fsdata)
  356. {
  357. int ret;
  358. ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
  359. affs_get_block,
  360. &AFFS_I(mapping->host)->mmu_private);
  361. if (unlikely(ret))
  362. affs_write_failed(mapping, pos + len);
  363. return ret;
  364. }
  365. static int affs_write_end(struct file *file, struct address_space *mapping,
  366. loff_t pos, unsigned int len, unsigned int copied,
  367. struct folio *folio, void *fsdata)
  368. {
  369. struct inode *inode = mapping->host;
  370. int ret;
  371. ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata);
  372. /* Clear Archived bit on file writes, as AmigaOS would do */
  373. if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
  374. AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
  375. mark_inode_dirty(inode);
  376. }
  377. return ret;
  378. }
  379. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  380. {
  381. return generic_block_bmap(mapping,block,affs_get_block);
  382. }
  383. const struct address_space_operations affs_aops = {
  384. .dirty_folio = block_dirty_folio,
  385. .invalidate_folio = block_invalidate_folio,
  386. .read_folio = affs_read_folio,
  387. .writepages = affs_writepages,
  388. .write_begin = affs_write_begin,
  389. .write_end = affs_write_end,
  390. .direct_IO = affs_direct_IO,
  391. .migrate_folio = buffer_migrate_folio,
  392. .bmap = _affs_bmap
  393. };
  394. static inline struct buffer_head *
  395. affs_bread_ino(struct inode *inode, int block, int create)
  396. {
  397. struct buffer_head *bh, tmp_bh;
  398. int err;
  399. tmp_bh.b_state = 0;
  400. err = affs_get_block(inode, block, &tmp_bh, create);
  401. if (!err) {
  402. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  403. if (bh) {
  404. bh->b_state |= tmp_bh.b_state;
  405. return bh;
  406. }
  407. err = -EIO;
  408. }
  409. return ERR_PTR(err);
  410. }
  411. static inline struct buffer_head *
  412. affs_getzeroblk_ino(struct inode *inode, int block)
  413. {
  414. struct buffer_head *bh, tmp_bh;
  415. int err;
  416. tmp_bh.b_state = 0;
  417. err = affs_get_block(inode, block, &tmp_bh, 1);
  418. if (!err) {
  419. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  420. if (bh) {
  421. bh->b_state |= tmp_bh.b_state;
  422. return bh;
  423. }
  424. err = -EIO;
  425. }
  426. return ERR_PTR(err);
  427. }
  428. static inline struct buffer_head *
  429. affs_getemptyblk_ino(struct inode *inode, int block)
  430. {
  431. struct buffer_head *bh, tmp_bh;
  432. int err;
  433. tmp_bh.b_state = 0;
  434. err = affs_get_block(inode, block, &tmp_bh, 1);
  435. if (!err) {
  436. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  437. if (bh) {
  438. bh->b_state |= tmp_bh.b_state;
  439. return bh;
  440. }
  441. err = -EIO;
  442. }
  443. return ERR_PTR(err);
  444. }
  445. static int affs_do_read_folio_ofs(struct folio *folio, size_t to, int create)
  446. {
  447. struct inode *inode = folio->mapping->host;
  448. struct super_block *sb = inode->i_sb;
  449. struct buffer_head *bh;
  450. size_t pos = 0;
  451. size_t bidx, boff, bsize;
  452. u32 tmp;
  453. pr_debug("%s(%lu, %ld, 0, %zu)\n", __func__, inode->i_ino,
  454. folio->index, to);
  455. BUG_ON(to > folio_size(folio));
  456. bsize = AFFS_SB(sb)->s_data_blksize;
  457. tmp = folio_pos(folio);
  458. bidx = tmp / bsize;
  459. boff = tmp % bsize;
  460. while (pos < to) {
  461. bh = affs_bread_ino(inode, bidx, create);
  462. if (IS_ERR(bh))
  463. return PTR_ERR(bh);
  464. tmp = min(bsize - boff, to - pos);
  465. BUG_ON(pos + tmp > to || tmp > bsize);
  466. memcpy_to_folio(folio, pos, AFFS_DATA(bh) + boff, tmp);
  467. affs_brelse(bh);
  468. bidx++;
  469. pos += tmp;
  470. boff = 0;
  471. }
  472. return 0;
  473. }
  474. static int
  475. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  476. {
  477. struct super_block *sb = inode->i_sb;
  478. struct buffer_head *bh, *prev_bh;
  479. u32 bidx, boff;
  480. u32 size, bsize;
  481. u32 tmp;
  482. pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize);
  483. bsize = AFFS_SB(sb)->s_data_blksize;
  484. bh = NULL;
  485. size = AFFS_I(inode)->mmu_private;
  486. bidx = size / bsize;
  487. boff = size % bsize;
  488. if (boff) {
  489. bh = affs_bread_ino(inode, bidx, 0);
  490. if (IS_ERR(bh))
  491. return PTR_ERR(bh);
  492. tmp = min(bsize - boff, newsize - size);
  493. BUG_ON(boff + tmp > bsize || tmp > bsize);
  494. memset(AFFS_DATA(bh) + boff, 0, tmp);
  495. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  496. affs_fix_checksum(sb, bh);
  497. mark_buffer_dirty_inode(bh, inode);
  498. size += tmp;
  499. bidx++;
  500. } else if (bidx) {
  501. bh = affs_bread_ino(inode, bidx - 1, 0);
  502. if (IS_ERR(bh))
  503. return PTR_ERR(bh);
  504. }
  505. while (size < newsize) {
  506. prev_bh = bh;
  507. bh = affs_getzeroblk_ino(inode, bidx);
  508. if (IS_ERR(bh))
  509. goto out;
  510. tmp = min(bsize, newsize - size);
  511. BUG_ON(tmp > bsize);
  512. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  513. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  514. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
  515. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  516. affs_fix_checksum(sb, bh);
  517. bh->b_state &= ~(1UL << BH_New);
  518. mark_buffer_dirty_inode(bh, inode);
  519. if (prev_bh) {
  520. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  521. if (tmp_next)
  522. affs_warning(sb, "extent_file_ofs",
  523. "next block already set for %d (%d)",
  524. bidx, tmp_next);
  525. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  526. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  527. mark_buffer_dirty_inode(prev_bh, inode);
  528. affs_brelse(prev_bh);
  529. }
  530. size += bsize;
  531. bidx++;
  532. }
  533. affs_brelse(bh);
  534. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  535. return 0;
  536. out:
  537. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  538. return PTR_ERR(bh);
  539. }
  540. static int affs_read_folio_ofs(struct file *file, struct folio *folio)
  541. {
  542. struct inode *inode = folio->mapping->host;
  543. size_t to;
  544. int err;
  545. pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, folio->index);
  546. to = folio_size(folio);
  547. if (folio_pos(folio) + to > inode->i_size) {
  548. to = inode->i_size - folio_pos(folio);
  549. folio_zero_segment(folio, to, folio_size(folio));
  550. }
  551. err = affs_do_read_folio_ofs(folio, to, 0);
  552. if (!err)
  553. folio_mark_uptodate(folio);
  554. folio_unlock(folio);
  555. return err;
  556. }
  557. static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
  558. loff_t pos, unsigned len,
  559. struct folio **foliop, void **fsdata)
  560. {
  561. struct inode *inode = mapping->host;
  562. struct folio *folio;
  563. pgoff_t index;
  564. int err = 0;
  565. pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
  566. pos + len);
  567. if (pos > AFFS_I(inode)->mmu_private) {
  568. /* XXX: this probably leaves a too-big i_size in case of
  569. * failure. Should really be updating i_size at write_end time
  570. */
  571. err = affs_extent_file_ofs(inode, pos);
  572. if (err)
  573. return err;
  574. }
  575. index = pos >> PAGE_SHIFT;
  576. folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
  577. mapping_gfp_mask(mapping));
  578. if (IS_ERR(folio))
  579. return PTR_ERR(folio);
  580. *foliop = folio;
  581. if (folio_test_uptodate(folio))
  582. return 0;
  583. /* XXX: inefficient but safe in the face of short writes */
  584. err = affs_do_read_folio_ofs(folio, folio_size(folio), 1);
  585. if (err) {
  586. folio_unlock(folio);
  587. folio_put(folio);
  588. }
  589. return err;
  590. }
  591. static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
  592. loff_t pos, unsigned len, unsigned copied,
  593. struct folio *folio, void *fsdata)
  594. {
  595. struct inode *inode = mapping->host;
  596. struct super_block *sb = inode->i_sb;
  597. struct buffer_head *bh, *prev_bh;
  598. char *data;
  599. u32 bidx, boff, bsize;
  600. unsigned from, to;
  601. u32 tmp;
  602. int written;
  603. from = pos & (PAGE_SIZE - 1);
  604. to = from + len;
  605. /*
  606. * XXX: not sure if this can handle short copies (len < copied), but
  607. * we don't have to, because the folio should always be uptodate here,
  608. * due to write_begin.
  609. */
  610. pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
  611. pos + len);
  612. bsize = AFFS_SB(sb)->s_data_blksize;
  613. data = folio_address(folio);
  614. bh = NULL;
  615. written = 0;
  616. tmp = (folio->index << PAGE_SHIFT) + from;
  617. bidx = tmp / bsize;
  618. boff = tmp % bsize;
  619. if (boff) {
  620. bh = affs_bread_ino(inode, bidx, 0);
  621. if (IS_ERR(bh)) {
  622. written = PTR_ERR(bh);
  623. goto err_first_bh;
  624. }
  625. tmp = min(bsize - boff, to - from);
  626. BUG_ON(boff + tmp > bsize || tmp > bsize);
  627. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  628. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(
  629. max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size)));
  630. affs_fix_checksum(sb, bh);
  631. mark_buffer_dirty_inode(bh, inode);
  632. written += tmp;
  633. from += tmp;
  634. bidx++;
  635. } else if (bidx) {
  636. bh = affs_bread_ino(inode, bidx - 1, 0);
  637. if (IS_ERR(bh)) {
  638. written = PTR_ERR(bh);
  639. goto err_first_bh;
  640. }
  641. }
  642. while (from + bsize <= to) {
  643. prev_bh = bh;
  644. bh = affs_getemptyblk_ino(inode, bidx);
  645. if (IS_ERR(bh))
  646. goto err_bh;
  647. memcpy(AFFS_DATA(bh), data + from, bsize);
  648. if (buffer_new(bh)) {
  649. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  650. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  651. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
  652. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  653. AFFS_DATA_HEAD(bh)->next = 0;
  654. bh->b_state &= ~(1UL << BH_New);
  655. if (prev_bh) {
  656. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  657. if (tmp_next)
  658. affs_warning(sb, "commit_write_ofs",
  659. "next block already set for %d (%d)",
  660. bidx, tmp_next);
  661. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  662. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  663. mark_buffer_dirty_inode(prev_bh, inode);
  664. }
  665. }
  666. affs_brelse(prev_bh);
  667. affs_fix_checksum(sb, bh);
  668. mark_buffer_dirty_inode(bh, inode);
  669. written += bsize;
  670. from += bsize;
  671. bidx++;
  672. }
  673. if (from < to) {
  674. prev_bh = bh;
  675. bh = affs_bread_ino(inode, bidx, 1);
  676. if (IS_ERR(bh))
  677. goto err_bh;
  678. tmp = min(bsize, to - from);
  679. BUG_ON(tmp > bsize);
  680. memcpy(AFFS_DATA(bh), data + from, tmp);
  681. if (buffer_new(bh)) {
  682. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  683. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  684. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
  685. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  686. AFFS_DATA_HEAD(bh)->next = 0;
  687. bh->b_state &= ~(1UL << BH_New);
  688. if (prev_bh) {
  689. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  690. if (tmp_next)
  691. affs_warning(sb, "commit_write_ofs",
  692. "next block already set for %d (%d)",
  693. bidx, tmp_next);
  694. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  695. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  696. mark_buffer_dirty_inode(prev_bh, inode);
  697. }
  698. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  699. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  700. affs_brelse(prev_bh);
  701. affs_fix_checksum(sb, bh);
  702. mark_buffer_dirty_inode(bh, inode);
  703. written += tmp;
  704. from += tmp;
  705. bidx++;
  706. }
  707. folio_mark_uptodate(folio);
  708. done:
  709. affs_brelse(bh);
  710. tmp = (folio->index << PAGE_SHIFT) + from;
  711. if (tmp > inode->i_size)
  712. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  713. /* Clear Archived bit on file writes, as AmigaOS would do */
  714. if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
  715. AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
  716. mark_inode_dirty(inode);
  717. }
  718. err_first_bh:
  719. folio_unlock(folio);
  720. folio_put(folio);
  721. return written;
  722. err_bh:
  723. bh = prev_bh;
  724. if (!written)
  725. written = PTR_ERR(bh);
  726. goto done;
  727. }
  728. const struct address_space_operations affs_aops_ofs = {
  729. .dirty_folio = block_dirty_folio,
  730. .invalidate_folio = block_invalidate_folio,
  731. .read_folio = affs_read_folio_ofs,
  732. //.writepages = affs_writepages_ofs,
  733. .write_begin = affs_write_begin_ofs,
  734. .write_end = affs_write_end_ofs,
  735. .migrate_folio = filemap_migrate_folio,
  736. };
  737. /* Free any preallocated blocks. */
  738. void
  739. affs_free_prealloc(struct inode *inode)
  740. {
  741. struct super_block *sb = inode->i_sb;
  742. pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino);
  743. while (AFFS_I(inode)->i_pa_cnt) {
  744. AFFS_I(inode)->i_pa_cnt--;
  745. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  746. }
  747. }
  748. /* Truncate (or enlarge) a file to the requested size. */
  749. void
  750. affs_truncate(struct inode *inode)
  751. {
  752. struct super_block *sb = inode->i_sb;
  753. u32 ext, ext_key;
  754. u32 last_blk, blkcnt, blk;
  755. u32 size;
  756. struct buffer_head *ext_bh;
  757. int i;
  758. pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n",
  759. inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size);
  760. last_blk = 0;
  761. ext = 0;
  762. if (inode->i_size) {
  763. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  764. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  765. }
  766. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  767. struct address_space *mapping = inode->i_mapping;
  768. struct folio *folio;
  769. void *fsdata = NULL;
  770. loff_t isize = inode->i_size;
  771. int res;
  772. res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &folio, &fsdata);
  773. if (!res)
  774. res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, folio, fsdata);
  775. else
  776. inode->i_size = AFFS_I(inode)->mmu_private;
  777. mark_inode_dirty(inode);
  778. return;
  779. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  780. return;
  781. // lock cache
  782. ext_bh = affs_get_extblock(inode, ext);
  783. if (IS_ERR(ext_bh)) {
  784. affs_warning(sb, "truncate",
  785. "unexpected read error for ext block %u (%ld)",
  786. ext, PTR_ERR(ext_bh));
  787. return;
  788. }
  789. if (AFFS_I(inode)->i_lc) {
  790. /* clear linear cache */
  791. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  792. if (AFFS_I(inode)->i_lc_size > i) {
  793. AFFS_I(inode)->i_lc_size = i;
  794. for (; i < AFFS_LC_SIZE; i++)
  795. AFFS_I(inode)->i_lc[i] = 0;
  796. }
  797. /* clear associative cache */
  798. for (i = 0; i < AFFS_AC_SIZE; i++)
  799. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  800. AFFS_I(inode)->i_ac[i].ext = 0;
  801. }
  802. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  803. blkcnt = AFFS_I(inode)->i_blkcnt;
  804. i = 0;
  805. blk = last_blk;
  806. if (inode->i_size) {
  807. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  808. blk++;
  809. } else
  810. AFFS_HEAD(ext_bh)->first_data = 0;
  811. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i);
  812. size = AFFS_SB(sb)->s_hashsize;
  813. if (size > blkcnt - blk + i)
  814. size = blkcnt - blk + i;
  815. for (; i < size; i++, blk++) {
  816. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  817. AFFS_BLOCK(sb, ext_bh, i) = 0;
  818. }
  819. AFFS_TAIL(sb, ext_bh)->extension = 0;
  820. affs_fix_checksum(sb, ext_bh);
  821. mark_buffer_dirty_inode(ext_bh, inode);
  822. affs_brelse(ext_bh);
  823. if (inode->i_size) {
  824. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  825. AFFS_I(inode)->i_extcnt = ext + 1;
  826. if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) {
  827. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  828. u32 tmp;
  829. if (IS_ERR(bh)) {
  830. affs_warning(sb, "truncate",
  831. "unexpected read error for last block %u (%ld)",
  832. ext, PTR_ERR(bh));
  833. return;
  834. }
  835. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  836. AFFS_DATA_HEAD(bh)->next = 0;
  837. affs_adjust_checksum(bh, -tmp);
  838. affs_brelse(bh);
  839. }
  840. } else {
  841. AFFS_I(inode)->i_blkcnt = 0;
  842. AFFS_I(inode)->i_extcnt = 1;
  843. }
  844. AFFS_I(inode)->mmu_private = inode->i_size;
  845. // unlock cache
  846. while (ext_key) {
  847. ext_bh = affs_bread(sb, ext_key);
  848. size = AFFS_SB(sb)->s_hashsize;
  849. if (size > blkcnt - blk)
  850. size = blkcnt - blk;
  851. for (i = 0; i < size; i++, blk++)
  852. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  853. affs_free_block(sb, ext_key);
  854. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  855. affs_brelse(ext_bh);
  856. }
  857. affs_free_prealloc(inode);
  858. }
  859. int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
  860. {
  861. struct inode *inode = filp->f_mapping->host;
  862. int ret, err;
  863. err = file_write_and_wait_range(filp, start, end);
  864. if (err)
  865. return err;
  866. inode_lock(inode);
  867. ret = write_inode_now(inode, 0);
  868. err = sync_blockdev(inode->i_sb->s_bdev);
  869. if (!ret)
  870. ret = err;
  871. inode_unlock(inode);
  872. return ret;
  873. }
  874. const struct file_operations affs_file_operations = {
  875. .llseek = generic_file_llseek,
  876. .read_iter = generic_file_read_iter,
  877. .write_iter = generic_file_write_iter,
  878. .mmap = generic_file_mmap,
  879. .open = affs_file_open,
  880. .release = affs_file_release,
  881. .fsync = affs_file_fsync,
  882. .splice_read = filemap_splice_read,
  883. };
  884. const struct inode_operations affs_file_inode_operations = {
  885. .setattr = affs_notify_change,
  886. };