file.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/affs/file.c
  4. *
  5. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  6. *
  7. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  8. *
  9. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  10. *
  11. * (C) 1991 Linus Torvalds - minix filesystem
  12. *
  13. * affs regular file handling primitives
  14. */
  15. #include <linux/uio.h>
  16. #include "affs.h"
  17. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  18. static int
  19. affs_file_open(struct inode *inode, struct file *filp)
  20. {
  21. pr_debug("open(%lu,%d)\n",
  22. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  23. atomic_inc(&AFFS_I(inode)->i_opencnt);
  24. return 0;
  25. }
  26. static int
  27. affs_file_release(struct inode *inode, struct file *filp)
  28. {
  29. pr_debug("release(%lu, %d)\n",
  30. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  31. if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
  32. inode_lock(inode);
  33. if (inode->i_size != AFFS_I(inode)->mmu_private)
  34. affs_truncate(inode);
  35. affs_free_prealloc(inode);
  36. inode_unlock(inode);
  37. }
  38. return 0;
  39. }
  40. static int
  41. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  42. {
  43. struct super_block *sb = inode->i_sb;
  44. struct buffer_head *bh;
  45. u32 lc_max;
  46. int i, j, key;
  47. if (!AFFS_I(inode)->i_lc) {
  48. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  49. if (!ptr)
  50. return -ENOMEM;
  51. AFFS_I(inode)->i_lc = (u32 *)ptr;
  52. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  53. }
  54. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  55. if (AFFS_I(inode)->i_extcnt > lc_max) {
  56. u32 lc_shift, lc_mask, tmp, off;
  57. /* need to recalculate linear cache, start from old size */
  58. lc_shift = AFFS_I(inode)->i_lc_shift;
  59. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  60. for (; tmp; tmp >>= 1)
  61. lc_shift++;
  62. lc_mask = (1 << lc_shift) - 1;
  63. /* fix idx and old size to new shift */
  64. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  65. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  66. /* first shrink old cache to make more space */
  67. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  68. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  69. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  70. AFFS_I(inode)->i_lc_shift = lc_shift;
  71. AFFS_I(inode)->i_lc_mask = lc_mask;
  72. }
  73. /* fill cache to the needed index */
  74. i = AFFS_I(inode)->i_lc_size;
  75. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  76. for (; i <= lc_idx; i++) {
  77. if (!i) {
  78. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  79. continue;
  80. }
  81. key = AFFS_I(inode)->i_lc[i - 1];
  82. j = AFFS_I(inode)->i_lc_mask + 1;
  83. // unlock cache
  84. for (; j > 0; j--) {
  85. bh = affs_bread(sb, key);
  86. if (!bh)
  87. goto err;
  88. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  89. affs_brelse(bh);
  90. }
  91. // lock cache
  92. AFFS_I(inode)->i_lc[i] = key;
  93. }
  94. return 0;
  95. err:
  96. // lock cache
  97. return -EIO;
  98. }
  99. static struct buffer_head *
  100. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  101. {
  102. struct super_block *sb = inode->i_sb;
  103. struct buffer_head *new_bh;
  104. u32 blocknr, tmp;
  105. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  106. if (!blocknr)
  107. return ERR_PTR(-ENOSPC);
  108. new_bh = affs_getzeroblk(sb, blocknr);
  109. if (!new_bh) {
  110. affs_free_block(sb, blocknr);
  111. return ERR_PTR(-EIO);
  112. }
  113. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  114. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  115. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  116. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  117. affs_fix_checksum(sb, new_bh);
  118. mark_buffer_dirty_inode(new_bh, inode);
  119. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  120. if (tmp)
  121. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  122. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  123. affs_adjust_checksum(bh, blocknr - tmp);
  124. mark_buffer_dirty_inode(bh, inode);
  125. AFFS_I(inode)->i_extcnt++;
  126. mark_inode_dirty(inode);
  127. return new_bh;
  128. }
  129. static inline struct buffer_head *
  130. affs_get_extblock(struct inode *inode, u32 ext)
  131. {
  132. /* inline the simplest case: same extended block as last time */
  133. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  134. if (ext == AFFS_I(inode)->i_ext_last)
  135. get_bh(bh);
  136. else
  137. /* we have to do more (not inlined) */
  138. bh = affs_get_extblock_slow(inode, ext);
  139. return bh;
  140. }
  141. static struct buffer_head *
  142. affs_get_extblock_slow(struct inode *inode, u32 ext)
  143. {
  144. struct super_block *sb = inode->i_sb;
  145. struct buffer_head *bh;
  146. u32 ext_key;
  147. u32 lc_idx, lc_off, ac_idx;
  148. u32 tmp, idx;
  149. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  150. /* read the next extended block from the current one */
  151. bh = AFFS_I(inode)->i_ext_bh;
  152. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  153. if (ext < AFFS_I(inode)->i_extcnt)
  154. goto read_ext;
  155. BUG_ON(ext > AFFS_I(inode)->i_extcnt);
  156. bh = affs_alloc_extblock(inode, bh, ext);
  157. if (IS_ERR(bh))
  158. return bh;
  159. goto store_ext;
  160. }
  161. if (ext == 0) {
  162. /* we seek back to the file header block */
  163. ext_key = inode->i_ino;
  164. goto read_ext;
  165. }
  166. if (ext >= AFFS_I(inode)->i_extcnt) {
  167. struct buffer_head *prev_bh;
  168. /* allocate a new extended block */
  169. BUG_ON(ext > AFFS_I(inode)->i_extcnt);
  170. /* get previous extended block */
  171. prev_bh = affs_get_extblock(inode, ext - 1);
  172. if (IS_ERR(prev_bh))
  173. return prev_bh;
  174. bh = affs_alloc_extblock(inode, prev_bh, ext);
  175. affs_brelse(prev_bh);
  176. if (IS_ERR(bh))
  177. return bh;
  178. goto store_ext;
  179. }
  180. again:
  181. /* check if there is an extended cache and whether it's large enough */
  182. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  183. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  184. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  185. int err;
  186. err = affs_grow_extcache(inode, lc_idx);
  187. if (err)
  188. return ERR_PTR(err);
  189. goto again;
  190. }
  191. /* every n'th key we find in the linear cache */
  192. if (!lc_off) {
  193. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  194. goto read_ext;
  195. }
  196. /* maybe it's still in the associative cache */
  197. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  198. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  199. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  200. goto read_ext;
  201. }
  202. /* try to find one of the previous extended blocks */
  203. tmp = ext;
  204. idx = ac_idx;
  205. while (--tmp, --lc_off > 0) {
  206. idx = (idx - 1) & AFFS_AC_MASK;
  207. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  208. ext_key = AFFS_I(inode)->i_ac[idx].key;
  209. goto find_ext;
  210. }
  211. }
  212. /* fall back to the linear cache */
  213. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  214. find_ext:
  215. /* read all extended blocks until we find the one we need */
  216. //unlock cache
  217. do {
  218. bh = affs_bread(sb, ext_key);
  219. if (!bh)
  220. goto err_bread;
  221. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  222. affs_brelse(bh);
  223. tmp++;
  224. } while (tmp < ext);
  225. //lock cache
  226. /* store it in the associative cache */
  227. // recalculate ac_idx?
  228. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  229. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  230. read_ext:
  231. /* finally read the right extended block */
  232. //unlock cache
  233. bh = affs_bread(sb, ext_key);
  234. if (!bh)
  235. goto err_bread;
  236. //lock cache
  237. store_ext:
  238. /* release old cached extended block and store the new one */
  239. affs_brelse(AFFS_I(inode)->i_ext_bh);
  240. AFFS_I(inode)->i_ext_last = ext;
  241. AFFS_I(inode)->i_ext_bh = bh;
  242. get_bh(bh);
  243. return bh;
  244. err_bread:
  245. affs_brelse(bh);
  246. return ERR_PTR(-EIO);
  247. }
  248. static int
  249. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  250. {
  251. struct super_block *sb = inode->i_sb;
  252. struct buffer_head *ext_bh;
  253. u32 ext;
  254. pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino,
  255. (unsigned long long)block);
  256. BUG_ON(block > (sector_t)0x7fffffffUL);
  257. if (block >= AFFS_I(inode)->i_blkcnt) {
  258. if (block > AFFS_I(inode)->i_blkcnt || !create)
  259. goto err_big;
  260. } else
  261. create = 0;
  262. //lock cache
  263. affs_lock_ext(inode);
  264. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  265. block -= ext * AFFS_SB(sb)->s_hashsize;
  266. ext_bh = affs_get_extblock(inode, ext);
  267. if (IS_ERR(ext_bh))
  268. goto err_ext;
  269. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  270. if (create) {
  271. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  272. if (!blocknr)
  273. goto err_alloc;
  274. set_buffer_new(bh_result);
  275. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  276. AFFS_I(inode)->i_blkcnt++;
  277. /* store new block */
  278. if (bh_result->b_blocknr)
  279. affs_warning(sb, "get_block",
  280. "block already set (%llx)",
  281. (unsigned long long)bh_result->b_blocknr);
  282. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  283. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  284. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  285. bh_result->b_blocknr = blocknr;
  286. if (!block) {
  287. /* insert first block into header block */
  288. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  289. if (tmp)
  290. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  291. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  292. affs_adjust_checksum(ext_bh, blocknr - tmp);
  293. }
  294. }
  295. affs_brelse(ext_bh);
  296. //unlock cache
  297. affs_unlock_ext(inode);
  298. return 0;
  299. err_big:
  300. affs_error(inode->i_sb, "get_block", "strange block request %llu",
  301. (unsigned long long)block);
  302. return -EIO;
  303. err_ext:
  304. // unlock cache
  305. affs_unlock_ext(inode);
  306. return PTR_ERR(ext_bh);
  307. err_alloc:
  308. brelse(ext_bh);
  309. clear_buffer_mapped(bh_result);
  310. bh_result->b_bdev = NULL;
  311. // unlock cache
  312. affs_unlock_ext(inode);
  313. return -ENOSPC;
  314. }
  315. static int affs_writepage(struct page *page, struct writeback_control *wbc)
  316. {
  317. return block_write_full_page(page, affs_get_block, wbc);
  318. }
  319. static int affs_readpage(struct file *file, struct page *page)
  320. {
  321. return block_read_full_page(page, affs_get_block);
  322. }
  323. static void affs_write_failed(struct address_space *mapping, loff_t to)
  324. {
  325. struct inode *inode = mapping->host;
  326. if (to > inode->i_size) {
  327. truncate_pagecache(inode, inode->i_size);
  328. affs_truncate(inode);
  329. }
  330. }
  331. static ssize_t
  332. affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  333. {
  334. struct file *file = iocb->ki_filp;
  335. struct address_space *mapping = file->f_mapping;
  336. struct inode *inode = mapping->host;
  337. size_t count = iov_iter_count(iter);
  338. loff_t offset = iocb->ki_pos;
  339. ssize_t ret;
  340. if (iov_iter_rw(iter) == WRITE) {
  341. loff_t size = offset + count;
  342. if (AFFS_I(inode)->mmu_private < size)
  343. return 0;
  344. }
  345. ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block);
  346. if (ret < 0 && iov_iter_rw(iter) == WRITE)
  347. affs_write_failed(mapping, offset + count);
  348. return ret;
  349. }
  350. static int affs_write_begin(struct file *file, struct address_space *mapping,
  351. loff_t pos, unsigned len, unsigned flags,
  352. struct page **pagep, void **fsdata)
  353. {
  354. int ret;
  355. *pagep = NULL;
  356. ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  357. affs_get_block,
  358. &AFFS_I(mapping->host)->mmu_private);
  359. if (unlikely(ret))
  360. affs_write_failed(mapping, pos + len);
  361. return ret;
  362. }
  363. static int affs_write_end(struct file *file, struct address_space *mapping,
  364. loff_t pos, unsigned int len, unsigned int copied,
  365. struct page *page, void *fsdata)
  366. {
  367. struct inode *inode = mapping->host;
  368. int ret;
  369. ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
  370. /* Clear Archived bit on file writes, as AmigaOS would do */
  371. if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
  372. AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
  373. mark_inode_dirty(inode);
  374. }
  375. return ret;
  376. }
  377. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  378. {
  379. return generic_block_bmap(mapping,block,affs_get_block);
  380. }
  381. const struct address_space_operations affs_aops = {
  382. .readpage = affs_readpage,
  383. .writepage = affs_writepage,
  384. .write_begin = affs_write_begin,
  385. .write_end = affs_write_end,
  386. .direct_IO = affs_direct_IO,
  387. .bmap = _affs_bmap
  388. };
  389. static inline struct buffer_head *
  390. affs_bread_ino(struct inode *inode, int block, int create)
  391. {
  392. struct buffer_head *bh, tmp_bh;
  393. int err;
  394. tmp_bh.b_state = 0;
  395. err = affs_get_block(inode, block, &tmp_bh, create);
  396. if (!err) {
  397. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  398. if (bh) {
  399. bh->b_state |= tmp_bh.b_state;
  400. return bh;
  401. }
  402. err = -EIO;
  403. }
  404. return ERR_PTR(err);
  405. }
  406. static inline struct buffer_head *
  407. affs_getzeroblk_ino(struct inode *inode, int block)
  408. {
  409. struct buffer_head *bh, tmp_bh;
  410. int err;
  411. tmp_bh.b_state = 0;
  412. err = affs_get_block(inode, block, &tmp_bh, 1);
  413. if (!err) {
  414. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  415. if (bh) {
  416. bh->b_state |= tmp_bh.b_state;
  417. return bh;
  418. }
  419. err = -EIO;
  420. }
  421. return ERR_PTR(err);
  422. }
  423. static inline struct buffer_head *
  424. affs_getemptyblk_ino(struct inode *inode, int block)
  425. {
  426. struct buffer_head *bh, tmp_bh;
  427. int err;
  428. tmp_bh.b_state = 0;
  429. err = affs_get_block(inode, block, &tmp_bh, 1);
  430. if (!err) {
  431. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  432. if (bh) {
  433. bh->b_state |= tmp_bh.b_state;
  434. return bh;
  435. }
  436. err = -EIO;
  437. }
  438. return ERR_PTR(err);
  439. }
  440. static int
  441. affs_do_readpage_ofs(struct page *page, unsigned to, int create)
  442. {
  443. struct inode *inode = page->mapping->host;
  444. struct super_block *sb = inode->i_sb;
  445. struct buffer_head *bh;
  446. char *data;
  447. unsigned pos = 0;
  448. u32 bidx, boff, bsize;
  449. u32 tmp;
  450. pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
  451. page->index, to);
  452. BUG_ON(to > PAGE_SIZE);
  453. bsize = AFFS_SB(sb)->s_data_blksize;
  454. tmp = page->index << PAGE_SHIFT;
  455. bidx = tmp / bsize;
  456. boff = tmp % bsize;
  457. while (pos < to) {
  458. bh = affs_bread_ino(inode, bidx, create);
  459. if (IS_ERR(bh))
  460. return PTR_ERR(bh);
  461. tmp = min(bsize - boff, to - pos);
  462. BUG_ON(pos + tmp > to || tmp > bsize);
  463. data = kmap_atomic(page);
  464. memcpy(data + pos, AFFS_DATA(bh) + boff, tmp);
  465. kunmap_atomic(data);
  466. affs_brelse(bh);
  467. bidx++;
  468. pos += tmp;
  469. boff = 0;
  470. }
  471. flush_dcache_page(page);
  472. return 0;
  473. }
  474. static int
  475. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  476. {
  477. struct super_block *sb = inode->i_sb;
  478. struct buffer_head *bh, *prev_bh;
  479. u32 bidx, boff;
  480. u32 size, bsize;
  481. u32 tmp;
  482. pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize);
  483. bsize = AFFS_SB(sb)->s_data_blksize;
  484. bh = NULL;
  485. size = AFFS_I(inode)->mmu_private;
  486. bidx = size / bsize;
  487. boff = size % bsize;
  488. if (boff) {
  489. bh = affs_bread_ino(inode, bidx, 0);
  490. if (IS_ERR(bh))
  491. return PTR_ERR(bh);
  492. tmp = min(bsize - boff, newsize - size);
  493. BUG_ON(boff + tmp > bsize || tmp > bsize);
  494. memset(AFFS_DATA(bh) + boff, 0, tmp);
  495. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  496. affs_fix_checksum(sb, bh);
  497. mark_buffer_dirty_inode(bh, inode);
  498. size += tmp;
  499. bidx++;
  500. } else if (bidx) {
  501. bh = affs_bread_ino(inode, bidx - 1, 0);
  502. if (IS_ERR(bh))
  503. return PTR_ERR(bh);
  504. }
  505. while (size < newsize) {
  506. prev_bh = bh;
  507. bh = affs_getzeroblk_ino(inode, bidx);
  508. if (IS_ERR(bh))
  509. goto out;
  510. tmp = min(bsize, newsize - size);
  511. BUG_ON(tmp > bsize);
  512. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  513. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  514. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  515. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  516. affs_fix_checksum(sb, bh);
  517. bh->b_state &= ~(1UL << BH_New);
  518. mark_buffer_dirty_inode(bh, inode);
  519. if (prev_bh) {
  520. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  521. if (tmp_next)
  522. affs_warning(sb, "extent_file_ofs",
  523. "next block already set for %d (%d)",
  524. bidx, tmp_next);
  525. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  526. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  527. mark_buffer_dirty_inode(prev_bh, inode);
  528. affs_brelse(prev_bh);
  529. }
  530. size += bsize;
  531. bidx++;
  532. }
  533. affs_brelse(bh);
  534. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  535. return 0;
  536. out:
  537. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  538. return PTR_ERR(bh);
  539. }
  540. static int
  541. affs_readpage_ofs(struct file *file, struct page *page)
  542. {
  543. struct inode *inode = page->mapping->host;
  544. u32 to;
  545. int err;
  546. pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
  547. to = PAGE_SIZE;
  548. if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
  549. to = inode->i_size & ~PAGE_MASK;
  550. memset(page_address(page) + to, 0, PAGE_SIZE - to);
  551. }
  552. err = affs_do_readpage_ofs(page, to, 0);
  553. if (!err)
  554. SetPageUptodate(page);
  555. unlock_page(page);
  556. return err;
  557. }
  558. static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
  559. loff_t pos, unsigned len, unsigned flags,
  560. struct page **pagep, void **fsdata)
  561. {
  562. struct inode *inode = mapping->host;
  563. struct page *page;
  564. pgoff_t index;
  565. int err = 0;
  566. pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
  567. pos + len);
  568. if (pos > AFFS_I(inode)->mmu_private) {
  569. /* XXX: this probably leaves a too-big i_size in case of
  570. * failure. Should really be updating i_size at write_end time
  571. */
  572. err = affs_extent_file_ofs(inode, pos);
  573. if (err)
  574. return err;
  575. }
  576. index = pos >> PAGE_SHIFT;
  577. page = grab_cache_page_write_begin(mapping, index, flags);
  578. if (!page)
  579. return -ENOMEM;
  580. *pagep = page;
  581. if (PageUptodate(page))
  582. return 0;
  583. /* XXX: inefficient but safe in the face of short writes */
  584. err = affs_do_readpage_ofs(page, PAGE_SIZE, 1);
  585. if (err) {
  586. unlock_page(page);
  587. put_page(page);
  588. }
  589. return err;
  590. }
  591. static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
  592. loff_t pos, unsigned len, unsigned copied,
  593. struct page *page, void *fsdata)
  594. {
  595. struct inode *inode = mapping->host;
  596. struct super_block *sb = inode->i_sb;
  597. struct buffer_head *bh, *prev_bh;
  598. char *data;
  599. u32 bidx, boff, bsize;
  600. unsigned from, to;
  601. u32 tmp;
  602. int written;
  603. from = pos & (PAGE_SIZE - 1);
  604. to = from + len;
  605. /*
  606. * XXX: not sure if this can handle short copies (len < copied), but
  607. * we don't have to, because the page should always be uptodate here,
  608. * due to write_begin.
  609. */
  610. pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
  611. pos + len);
  612. bsize = AFFS_SB(sb)->s_data_blksize;
  613. data = page_address(page);
  614. bh = NULL;
  615. written = 0;
  616. tmp = (page->index << PAGE_SHIFT) + from;
  617. bidx = tmp / bsize;
  618. boff = tmp % bsize;
  619. if (boff) {
  620. bh = affs_bread_ino(inode, bidx, 0);
  621. if (IS_ERR(bh)) {
  622. written = PTR_ERR(bh);
  623. goto err_first_bh;
  624. }
  625. tmp = min(bsize - boff, to - from);
  626. BUG_ON(boff + tmp > bsize || tmp > bsize);
  627. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  628. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  629. affs_fix_checksum(sb, bh);
  630. mark_buffer_dirty_inode(bh, inode);
  631. written += tmp;
  632. from += tmp;
  633. bidx++;
  634. } else if (bidx) {
  635. bh = affs_bread_ino(inode, bidx - 1, 0);
  636. if (IS_ERR(bh)) {
  637. written = PTR_ERR(bh);
  638. goto err_first_bh;
  639. }
  640. }
  641. while (from + bsize <= to) {
  642. prev_bh = bh;
  643. bh = affs_getemptyblk_ino(inode, bidx);
  644. if (IS_ERR(bh))
  645. goto err_bh;
  646. memcpy(AFFS_DATA(bh), data + from, bsize);
  647. if (buffer_new(bh)) {
  648. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  649. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  650. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  651. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  652. AFFS_DATA_HEAD(bh)->next = 0;
  653. bh->b_state &= ~(1UL << BH_New);
  654. if (prev_bh) {
  655. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  656. if (tmp_next)
  657. affs_warning(sb, "commit_write_ofs",
  658. "next block already set for %d (%d)",
  659. bidx, tmp_next);
  660. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  661. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  662. mark_buffer_dirty_inode(prev_bh, inode);
  663. }
  664. }
  665. affs_brelse(prev_bh);
  666. affs_fix_checksum(sb, bh);
  667. mark_buffer_dirty_inode(bh, inode);
  668. written += bsize;
  669. from += bsize;
  670. bidx++;
  671. }
  672. if (from < to) {
  673. prev_bh = bh;
  674. bh = affs_bread_ino(inode, bidx, 1);
  675. if (IS_ERR(bh))
  676. goto err_bh;
  677. tmp = min(bsize, to - from);
  678. BUG_ON(tmp > bsize);
  679. memcpy(AFFS_DATA(bh), data + from, tmp);
  680. if (buffer_new(bh)) {
  681. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  682. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  683. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  684. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  685. AFFS_DATA_HEAD(bh)->next = 0;
  686. bh->b_state &= ~(1UL << BH_New);
  687. if (prev_bh) {
  688. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  689. if (tmp_next)
  690. affs_warning(sb, "commit_write_ofs",
  691. "next block already set for %d (%d)",
  692. bidx, tmp_next);
  693. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  694. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  695. mark_buffer_dirty_inode(prev_bh, inode);
  696. }
  697. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  698. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  699. affs_brelse(prev_bh);
  700. affs_fix_checksum(sb, bh);
  701. mark_buffer_dirty_inode(bh, inode);
  702. written += tmp;
  703. from += tmp;
  704. bidx++;
  705. }
  706. SetPageUptodate(page);
  707. done:
  708. affs_brelse(bh);
  709. tmp = (page->index << PAGE_SHIFT) + from;
  710. if (tmp > inode->i_size)
  711. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  712. /* Clear Archived bit on file writes, as AmigaOS would do */
  713. if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
  714. AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
  715. mark_inode_dirty(inode);
  716. }
  717. err_first_bh:
  718. unlock_page(page);
  719. put_page(page);
  720. return written;
  721. err_bh:
  722. bh = prev_bh;
  723. if (!written)
  724. written = PTR_ERR(bh);
  725. goto done;
  726. }
  727. const struct address_space_operations affs_aops_ofs = {
  728. .readpage = affs_readpage_ofs,
  729. //.writepage = affs_writepage_ofs,
  730. .write_begin = affs_write_begin_ofs,
  731. .write_end = affs_write_end_ofs
  732. };
  733. /* Free any preallocated blocks. */
  734. void
  735. affs_free_prealloc(struct inode *inode)
  736. {
  737. struct super_block *sb = inode->i_sb;
  738. pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino);
  739. while (AFFS_I(inode)->i_pa_cnt) {
  740. AFFS_I(inode)->i_pa_cnt--;
  741. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  742. }
  743. }
  744. /* Truncate (or enlarge) a file to the requested size. */
  745. void
  746. affs_truncate(struct inode *inode)
  747. {
  748. struct super_block *sb = inode->i_sb;
  749. u32 ext, ext_key;
  750. u32 last_blk, blkcnt, blk;
  751. u32 size;
  752. struct buffer_head *ext_bh;
  753. int i;
  754. pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n",
  755. inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size);
  756. last_blk = 0;
  757. ext = 0;
  758. if (inode->i_size) {
  759. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  760. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  761. }
  762. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  763. struct address_space *mapping = inode->i_mapping;
  764. struct page *page;
  765. void *fsdata;
  766. loff_t isize = inode->i_size;
  767. int res;
  768. res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, 0, &page, &fsdata);
  769. if (!res)
  770. res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata);
  771. else
  772. inode->i_size = AFFS_I(inode)->mmu_private;
  773. mark_inode_dirty(inode);
  774. return;
  775. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  776. return;
  777. // lock cache
  778. ext_bh = affs_get_extblock(inode, ext);
  779. if (IS_ERR(ext_bh)) {
  780. affs_warning(sb, "truncate",
  781. "unexpected read error for ext block %u (%ld)",
  782. ext, PTR_ERR(ext_bh));
  783. return;
  784. }
  785. if (AFFS_I(inode)->i_lc) {
  786. /* clear linear cache */
  787. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  788. if (AFFS_I(inode)->i_lc_size > i) {
  789. AFFS_I(inode)->i_lc_size = i;
  790. for (; i < AFFS_LC_SIZE; i++)
  791. AFFS_I(inode)->i_lc[i] = 0;
  792. }
  793. /* clear associative cache */
  794. for (i = 0; i < AFFS_AC_SIZE; i++)
  795. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  796. AFFS_I(inode)->i_ac[i].ext = 0;
  797. }
  798. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  799. blkcnt = AFFS_I(inode)->i_blkcnt;
  800. i = 0;
  801. blk = last_blk;
  802. if (inode->i_size) {
  803. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  804. blk++;
  805. } else
  806. AFFS_HEAD(ext_bh)->first_data = 0;
  807. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i);
  808. size = AFFS_SB(sb)->s_hashsize;
  809. if (size > blkcnt - blk + i)
  810. size = blkcnt - blk + i;
  811. for (; i < size; i++, blk++) {
  812. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  813. AFFS_BLOCK(sb, ext_bh, i) = 0;
  814. }
  815. AFFS_TAIL(sb, ext_bh)->extension = 0;
  816. affs_fix_checksum(sb, ext_bh);
  817. mark_buffer_dirty_inode(ext_bh, inode);
  818. affs_brelse(ext_bh);
  819. if (inode->i_size) {
  820. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  821. AFFS_I(inode)->i_extcnt = ext + 1;
  822. if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) {
  823. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  824. u32 tmp;
  825. if (IS_ERR(bh)) {
  826. affs_warning(sb, "truncate",
  827. "unexpected read error for last block %u (%ld)",
  828. ext, PTR_ERR(bh));
  829. return;
  830. }
  831. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  832. AFFS_DATA_HEAD(bh)->next = 0;
  833. affs_adjust_checksum(bh, -tmp);
  834. affs_brelse(bh);
  835. }
  836. } else {
  837. AFFS_I(inode)->i_blkcnt = 0;
  838. AFFS_I(inode)->i_extcnt = 1;
  839. }
  840. AFFS_I(inode)->mmu_private = inode->i_size;
  841. // unlock cache
  842. while (ext_key) {
  843. ext_bh = affs_bread(sb, ext_key);
  844. size = AFFS_SB(sb)->s_hashsize;
  845. if (size > blkcnt - blk)
  846. size = blkcnt - blk;
  847. for (i = 0; i < size; i++, blk++)
  848. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  849. affs_free_block(sb, ext_key);
  850. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  851. affs_brelse(ext_bh);
  852. }
  853. affs_free_prealloc(inode);
  854. }
  855. int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
  856. {
  857. struct inode *inode = filp->f_mapping->host;
  858. int ret, err;
  859. err = file_write_and_wait_range(filp, start, end);
  860. if (err)
  861. return err;
  862. inode_lock(inode);
  863. ret = write_inode_now(inode, 0);
  864. err = sync_blockdev(inode->i_sb->s_bdev);
  865. if (!ret)
  866. ret = err;
  867. inode_unlock(inode);
  868. return ret;
  869. }
  870. const struct file_operations affs_file_operations = {
  871. .llseek = generic_file_llseek,
  872. .read_iter = generic_file_read_iter,
  873. .write_iter = generic_file_write_iter,
  874. .mmap = generic_file_mmap,
  875. .open = affs_file_open,
  876. .release = affs_file_release,
  877. .fsync = affs_file_fsync,
  878. .splice_read = generic_file_splice_read,
  879. };
  880. const struct inode_operations affs_file_inode_operations = {
  881. .setattr = affs_notify_change,
  882. };