blob: cc4f987687f3c1ff611d968b311e9724aeca7f4f [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
13#include <linux/pagevec.h>
14#include <linux/mpage.h>
15#include <linux/fs.h>
16#include <linux/writeback.h>
17#include <linux/swap.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/backing-dev.h>
20#include <linux/uio.h>
21#include <trace/events/writeback.h>
22#include <linux/sched/signal.h>
23
24#include "gfs2.h"
25#include "incore.h"
26#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
29#include "log.h"
30#include "meta_io.h"
31#include "quota.h"
32#include "trans.h"
33#include "rgrp.h"
34#include "super.h"
35#include "util.h"
36#include "glops.h"
37#include "aops.h"
38
39
40void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41 unsigned int from, unsigned int len)
42{
43 struct buffer_head *head = page_buffers(page);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
46 unsigned int to = from + len;
47 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from)
53 continue;
54 if (start >= to)
55 break;
56 set_buffer_uptodate(bh);
57 gfs2_trans_add_data(ip->i_gl, bh);
58 }
59}
60
61/**
62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
71static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
73{
74 int error;
75
76 error = gfs2_block_map(inode, lblock, bh_result, 0);
77 if (error)
78 return error;
79 if (!buffer_mapped(bh_result))
Olivier Deprez157378f2022-04-04 15:47:50 +020080 return -ENODATA;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081 return 0;
82}
83
84/**
David Brazdil0f672f62019-12-10 10:32:29 +000085 * gfs2_writepage - Write page for writeback mappings
86 * @page: The page
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087 * @wbc: The writeback control
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088 */
David Brazdil0f672f62019-12-10 10:32:29 +000089static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090{
91 struct inode *inode = page->mapping->host;
92 struct gfs2_inode *ip = GFS2_I(inode);
93 struct gfs2_sbd *sdp = GFS2_SB(inode);
Olivier Deprez157378f2022-04-04 15:47:50 +020094 struct iomap_writepage_ctx wpc = { };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095
96 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
97 goto out;
98 if (current->journal_info)
99 goto redirty;
Olivier Deprez157378f2022-04-04 15:47:50 +0200100 return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
David Brazdil0f672f62019-12-10 10:32:29 +0000101
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102redirty:
103 redirty_page_for_writepage(wbc, page);
104out:
105 unlock_page(page);
106 return 0;
107}
108
Olivier Deprez157378f2022-04-04 15:47:50 +0200109/**
110 * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
111 * @page: The page to write
112 * @wbc: The writeback control
113 *
114 * This is the same as calling block_write_full_page, but it also
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115 * writes pages outside of i_size
116 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200117static int gfs2_write_jdata_page(struct page *page,
118 struct writeback_control *wbc)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119{
120 struct inode * const inode = page->mapping->host;
121 loff_t i_size = i_size_read(inode);
122 const pgoff_t end_index = i_size >> PAGE_SHIFT;
123 unsigned offset;
124
125 /*
126 * The page straddles i_size. It must be zeroed out on each and every
127 * writepage invocation because it may be mmapped. "A file is mapped
128 * in multiples of the page size. For a file that is not a multiple of
129 * the page size, the remaining memory is zeroed when mapped, and
130 * writes to that region are not written out to the file."
131 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200132 offset = i_size & (PAGE_SIZE - 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000133 if (page->index == end_index && offset)
134 zero_user_segment(page, offset, PAGE_SIZE);
135
Olivier Deprez157378f2022-04-04 15:47:50 +0200136 return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137 end_buffer_async_write);
138}
139
140/**
141 * __gfs2_jdata_writepage - The core of jdata writepage
142 * @page: The page to write
143 * @wbc: The writeback control
144 *
145 * This is shared between writepage and writepages and implements the
146 * core of the writepage operation. If a transaction is required then
147 * PageChecked will have been set and the transaction will have
148 * already been started before this is called.
149 */
150
151static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152{
153 struct inode *inode = page->mapping->host;
154 struct gfs2_inode *ip = GFS2_I(inode);
155 struct gfs2_sbd *sdp = GFS2_SB(inode);
156
157 if (PageChecked(page)) {
158 ClearPageChecked(page);
159 if (!page_has_buffers(page)) {
160 create_empty_buffers(page, inode->i_sb->s_blocksize,
161 BIT(BH_Dirty)|BIT(BH_Uptodate));
162 }
163 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
164 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200165 return gfs2_write_jdata_page(page, wbc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000166}
167
168/**
169 * gfs2_jdata_writepage - Write complete page
170 * @page: Page to write
171 * @wbc: The writeback control
172 *
173 * Returns: errno
174 *
175 */
176
177static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
178{
179 struct inode *inode = page->mapping->host;
180 struct gfs2_inode *ip = GFS2_I(inode);
181 struct gfs2_sbd *sdp = GFS2_SB(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182
183 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
184 goto out;
185 if (PageChecked(page) || current->journal_info)
186 goto out_ignore;
Olivier Deprez157378f2022-04-04 15:47:50 +0200187 return __gfs2_jdata_writepage(page, wbc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000188
189out_ignore:
190 redirty_page_for_writepage(wbc, page);
191out:
192 unlock_page(page);
193 return 0;
194}
195
196/**
197 * gfs2_writepages - Write a bunch of dirty pages back to disk
198 * @mapping: The mapping to write
199 * @wbc: Write-back control
200 *
201 * Used for both ordered and writeback modes.
202 */
203static int gfs2_writepages(struct address_space *mapping,
204 struct writeback_control *wbc)
205{
206 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
Olivier Deprez157378f2022-04-04 15:47:50 +0200207 struct iomap_writepage_ctx wpc = { };
208 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000209
210 /*
211 * Even if we didn't write any pages here, we might still be holding
212 * dirty pages in the ail. We forcibly flush the ail because we don't
213 * want balance_dirty_pages() to loop indefinitely trying to write out
214 * pages held in the ail that it can't find.
215 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200216 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000217 if (ret == 0)
218 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000219 return ret;
220}
221
222/**
223 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
224 * @mapping: The mapping
225 * @wbc: The writeback control
226 * @pvec: The vector of pages
227 * @nr_pages: The number of pages to write
228 * @done_index: Page index
229 *
230 * Returns: non-zero if loop should terminate, zero otherwise
231 */
232
233static int gfs2_write_jdata_pagevec(struct address_space *mapping,
234 struct writeback_control *wbc,
235 struct pagevec *pvec,
236 int nr_pages,
237 pgoff_t *done_index)
238{
239 struct inode *inode = mapping->host;
240 struct gfs2_sbd *sdp = GFS2_SB(inode);
David Brazdil0f672f62019-12-10 10:32:29 +0000241 unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000242 int i;
243 int ret;
244
245 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
246 if (ret < 0)
247 return ret;
248
249 for(i = 0; i < nr_pages; i++) {
250 struct page *page = pvec->pages[i];
251
252 *done_index = page->index;
253
254 lock_page(page);
255
256 if (unlikely(page->mapping != mapping)) {
257continue_unlock:
258 unlock_page(page);
259 continue;
260 }
261
262 if (!PageDirty(page)) {
263 /* someone wrote it for us */
264 goto continue_unlock;
265 }
266
267 if (PageWriteback(page)) {
268 if (wbc->sync_mode != WB_SYNC_NONE)
269 wait_on_page_writeback(page);
270 else
271 goto continue_unlock;
272 }
273
274 BUG_ON(PageWriteback(page));
275 if (!clear_page_dirty_for_io(page))
276 goto continue_unlock;
277
278 trace_wbc_writepage(wbc, inode_to_bdi(inode));
279
280 ret = __gfs2_jdata_writepage(page, wbc);
281 if (unlikely(ret)) {
282 if (ret == AOP_WRITEPAGE_ACTIVATE) {
283 unlock_page(page);
284 ret = 0;
285 } else {
286
287 /*
288 * done_index is set past this page,
289 * so media errors will not choke
290 * background writeout for the entire
291 * file. This has consequences for
292 * range_cyclic semantics (ie. it may
293 * not be suitable for data integrity
294 * writeout).
295 */
296 *done_index = page->index + 1;
297 ret = 1;
298 break;
299 }
300 }
301
302 /*
303 * We stop writing back only if we are not doing
304 * integrity sync. In case of integrity sync we have to
305 * keep going until we have written all the pages
306 * we tagged for writeback prior to entering this loop.
307 */
308 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
309 ret = 1;
310 break;
311 }
312
313 }
314 gfs2_trans_end(sdp);
315 return ret;
316}
317
318/**
319 * gfs2_write_cache_jdata - Like write_cache_pages but different
320 * @mapping: The mapping to write
321 * @wbc: The writeback control
322 *
323 * The reason that we use our own function here is that we need to
324 * start transactions before we grab page locks. This allows us
325 * to get the ordering right.
326 */
327
328static int gfs2_write_cache_jdata(struct address_space *mapping,
329 struct writeback_control *wbc)
330{
331 int ret = 0;
332 int done = 0;
333 struct pagevec pvec;
334 int nr_pages;
Olivier Deprez157378f2022-04-04 15:47:50 +0200335 pgoff_t writeback_index;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336 pgoff_t index;
337 pgoff_t end;
338 pgoff_t done_index;
339 int cycled;
340 int range_whole = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000341 xa_mark_t tag;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000342
343 pagevec_init(&pvec);
344 if (wbc->range_cyclic) {
345 writeback_index = mapping->writeback_index; /* prev offset */
346 index = writeback_index;
347 if (index == 0)
348 cycled = 1;
349 else
350 cycled = 0;
351 end = -1;
352 } else {
353 index = wbc->range_start >> PAGE_SHIFT;
354 end = wbc->range_end >> PAGE_SHIFT;
355 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
356 range_whole = 1;
357 cycled = 1; /* ignore range_cyclic tests */
358 }
359 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
360 tag = PAGECACHE_TAG_TOWRITE;
361 else
362 tag = PAGECACHE_TAG_DIRTY;
363
364retry:
365 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
366 tag_pages_for_writeback(mapping, index, end);
367 done_index = index;
368 while (!done && (index <= end)) {
369 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
370 tag);
371 if (nr_pages == 0)
372 break;
373
374 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
375 if (ret)
376 done = 1;
377 if (ret > 0)
378 ret = 0;
379 pagevec_release(&pvec);
380 cond_resched();
381 }
382
383 if (!cycled && !done) {
384 /*
385 * range_cyclic:
386 * We hit the last page and there is more work to be done: wrap
387 * back to the start of the file
388 */
389 cycled = 1;
390 index = 0;
391 end = writeback_index - 1;
392 goto retry;
393 }
394
395 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
396 mapping->writeback_index = done_index;
397
398 return ret;
399}
400
401
402/**
403 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
404 * @mapping: The mapping to write
405 * @wbc: The writeback control
406 *
407 */
408
409static int gfs2_jdata_writepages(struct address_space *mapping,
410 struct writeback_control *wbc)
411{
412 struct gfs2_inode *ip = GFS2_I(mapping->host);
413 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
414 int ret;
415
416 ret = gfs2_write_cache_jdata(mapping, wbc);
417 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
418 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
419 GFS2_LFC_JDATA_WPAGES);
420 ret = gfs2_write_cache_jdata(mapping, wbc);
421 }
422 return ret;
423}
424
425/**
426 * stuffed_readpage - Fill in a Linux page with stuffed file data
427 * @ip: the inode
428 * @page: the page
429 *
430 * Returns: errno
431 */
David Brazdil0f672f62019-12-10 10:32:29 +0000432static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000433{
434 struct buffer_head *dibh;
435 u64 dsize = i_size_read(&ip->i_inode);
436 void *kaddr;
437 int error;
438
439 /*
440 * Due to the order of unstuffing files and ->fault(), we can be
441 * asked for a zero page in the case of a stuffed file being extended,
442 * so we need to supply one here. It doesn't happen often.
443 */
444 if (unlikely(page->index)) {
445 zero_user(page, 0, PAGE_SIZE);
446 SetPageUptodate(page);
447 return 0;
448 }
449
450 error = gfs2_meta_inode_buffer(ip, &dibh);
451 if (error)
452 return error;
453
454 kaddr = kmap_atomic(page);
455 if (dsize > gfs2_max_stuffed_size(ip))
456 dsize = gfs2_max_stuffed_size(ip);
457 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
458 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
459 kunmap_atomic(kaddr);
460 flush_dcache_page(page);
461 brelse(dibh);
462 SetPageUptodate(page);
463
464 return 0;
465}
466
467
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468static int __gfs2_readpage(void *file, struct page *page)
469{
Olivier Deprez157378f2022-04-04 15:47:50 +0200470 struct inode *inode = page->mapping->host;
471 struct gfs2_inode *ip = GFS2_I(inode);
472 struct gfs2_sbd *sdp = GFS2_SB(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000473 int error;
474
Olivier Deprez157378f2022-04-04 15:47:50 +0200475 if (!gfs2_is_jdata(ip) ||
476 (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000477 error = iomap_readpage(page, &gfs2_iomap_ops);
478 } else if (gfs2_is_stuffed(ip)) {
479 error = stuffed_readpage(ip, page);
480 unlock_page(page);
481 } else {
482 error = mpage_readpage(page, gfs2_block_map);
483 }
484
Olivier Deprez157378f2022-04-04 15:47:50 +0200485 if (unlikely(gfs2_withdrawn(sdp)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000486 return -EIO;
487
488 return error;
489}
490
491/**
492 * gfs2_readpage - read a page of a file
493 * @file: The file to read
494 * @page: The page of the file
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000495 */
496
497static int gfs2_readpage(struct file *file, struct page *page)
498{
Olivier Deprez157378f2022-04-04 15:47:50 +0200499 return __gfs2_readpage(file, page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000500}
501
502/**
503 * gfs2_internal_read - read an internal file
504 * @ip: The gfs2 inode
505 * @buf: The buffer to fill
506 * @pos: The file position
507 * @size: The amount to read
508 *
509 */
510
511int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
512 unsigned size)
513{
514 struct address_space *mapping = ip->i_inode.i_mapping;
David Brazdil0f672f62019-12-10 10:32:29 +0000515 unsigned long index = *pos >> PAGE_SHIFT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000516 unsigned offset = *pos & (PAGE_SIZE - 1);
517 unsigned copied = 0;
518 unsigned amt;
519 struct page *page;
520 void *p;
521
522 do {
523 amt = size - copied;
524 if (offset + size > PAGE_SIZE)
525 amt = PAGE_SIZE - offset;
526 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
527 if (IS_ERR(page))
528 return PTR_ERR(page);
529 p = kmap_atomic(page);
530 memcpy(buf + copied, p + offset, amt);
531 kunmap_atomic(p);
532 put_page(page);
533 copied += amt;
534 index++;
535 offset = 0;
536 } while(copied < size);
537 (*pos) += size;
538 return size;
539}
540
541/**
Olivier Deprez157378f2022-04-04 15:47:50 +0200542 * gfs2_readahead - Read a bunch of pages at once
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000543 * @file: The file to read from
544 * @mapping: Address space info
545 * @pages: List of pages to read
546 * @nr_pages: Number of pages to read
547 *
548 * Some notes:
549 * 1. This is only for readahead, so we can simply ignore any things
550 * which are slightly inconvenient (such as locking conflicts between
551 * the page lock and the glock) and return having done no I/O. Its
552 * obviously not something we'd want to do on too regular a basis.
553 * Any I/O we ignore at this time will be done via readpage later.
554 * 2. We don't handle stuffed files here we let readpage do the honours.
Olivier Deprez157378f2022-04-04 15:47:50 +0200555 * 3. mpage_readahead() does most of the heavy lifting in the common case.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000556 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
557 */
558
Olivier Deprez157378f2022-04-04 15:47:50 +0200559static void gfs2_readahead(struct readahead_control *rac)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000560{
Olivier Deprez157378f2022-04-04 15:47:50 +0200561 struct inode *inode = rac->mapping->host;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000562 struct gfs2_inode *ip = GFS2_I(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000563
Olivier Deprez157378f2022-04-04 15:47:50 +0200564 if (gfs2_is_stuffed(ip))
565 ;
566 else if (gfs2_is_jdata(ip))
567 mpage_readahead(rac, gfs2_block_map);
568 else
569 iomap_readahead(rac, &gfs2_iomap_ops);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000570}
571
572/**
573 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
574 * @inode: the rindex inode
575 */
576void adjust_fs_space(struct inode *inode)
577{
David Brazdil0f672f62019-12-10 10:32:29 +0000578 struct gfs2_sbd *sdp = GFS2_SB(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000579 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
580 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
581 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
582 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
583 struct buffer_head *m_bh, *l_bh;
584 u64 fs_total, new_free;
585
David Brazdil0f672f62019-12-10 10:32:29 +0000586 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
587 return;
588
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000589 /* Total up the file system space, according to the latest rindex. */
590 fs_total = gfs2_ri_total(sdp);
591 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
David Brazdil0f672f62019-12-10 10:32:29 +0000592 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000593
594 spin_lock(&sdp->sd_statfs_spin);
595 gfs2_statfs_change_in(m_sc, m_bh->b_data +
596 sizeof(struct gfs2_dinode));
597 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
598 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
599 else
600 new_free = 0;
601 spin_unlock(&sdp->sd_statfs_spin);
602 fs_warn(sdp, "File system extended by %llu blocks.\n",
603 (unsigned long long)new_free);
604 gfs2_statfs_change(sdp, new_free, new_free, 0);
605
606 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
David Brazdil0f672f62019-12-10 10:32:29 +0000607 goto out2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000608 update_statfs(sdp, m_bh, l_bh);
609 brelse(l_bh);
David Brazdil0f672f62019-12-10 10:32:29 +0000610out2:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000611 brelse(m_bh);
David Brazdil0f672f62019-12-10 10:32:29 +0000612out:
613 sdp->sd_rindex_uptodate = 0;
614 gfs2_trans_end(sdp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000615}
616
617/**
618 * jdata_set_page_dirty - Page dirtying function
619 * @page: The page to dirty
620 *
621 * Returns: 1 if it dirtyed the page, or 0 otherwise
622 */
623
624static int jdata_set_page_dirty(struct page *page)
625{
Olivier Deprez157378f2022-04-04 15:47:50 +0200626 if (current->journal_info)
627 SetPageChecked(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000628 return __set_page_dirty_buffers(page);
629}
630
631/**
632 * gfs2_bmap - Block map function
633 * @mapping: Address space info
634 * @lblock: The block to map
635 *
636 * Returns: The disk address for the block or 0 on hole or error
637 */
638
639static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
640{
641 struct gfs2_inode *ip = GFS2_I(mapping->host);
642 struct gfs2_holder i_gh;
643 sector_t dblock = 0;
644 int error;
645
646 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
647 if (error)
648 return 0;
649
650 if (!gfs2_is_stuffed(ip))
David Brazdil0f672f62019-12-10 10:32:29 +0000651 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000652
653 gfs2_glock_dq_uninit(&i_gh);
654
655 return dblock;
656}
657
658static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
659{
660 struct gfs2_bufdata *bd;
661
662 lock_buffer(bh);
663 gfs2_log_lock(sdp);
664 clear_buffer_dirty(bh);
665 bd = bh->b_private;
666 if (bd) {
667 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
668 list_del_init(&bd->bd_list);
Olivier Deprez157378f2022-04-04 15:47:50 +0200669 else {
670 spin_lock(&sdp->sd_ail_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000671 gfs2_remove_from_journal(bh, REMOVE_JDATA);
Olivier Deprez157378f2022-04-04 15:47:50 +0200672 spin_unlock(&sdp->sd_ail_lock);
673 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000674 }
675 bh->b_bdev = NULL;
676 clear_buffer_mapped(bh);
677 clear_buffer_req(bh);
678 clear_buffer_new(bh);
679 gfs2_log_unlock(sdp);
680 unlock_buffer(bh);
681}
682
683static void gfs2_invalidatepage(struct page *page, unsigned int offset,
684 unsigned int length)
685{
686 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
687 unsigned int stop = offset + length;
688 int partial_page = (offset || length < PAGE_SIZE);
689 struct buffer_head *bh, *head;
690 unsigned long pos = 0;
691
692 BUG_ON(!PageLocked(page));
693 if (!partial_page)
694 ClearPageChecked(page);
695 if (!page_has_buffers(page))
696 goto out;
697
698 bh = head = page_buffers(page);
699 do {
700 if (pos + bh->b_size > stop)
701 return;
702
703 if (offset <= pos)
704 gfs2_discard(sdp, bh);
705 pos += bh->b_size;
706 bh = bh->b_this_page;
707 } while (bh != head);
708out:
709 if (!partial_page)
710 try_to_release_page(page, 0);
711}
712
713/**
714 * gfs2_releasepage - free the metadata associated with a page
715 * @page: the page that's being released
716 * @gfp_mask: passed from Linux VFS, ignored by us
717 *
David Brazdil0f672f62019-12-10 10:32:29 +0000718 * Calls try_to_free_buffers() to free the buffers and put the page if the
719 * buffers can be released.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000720 *
David Brazdil0f672f62019-12-10 10:32:29 +0000721 * Returns: 1 if the page was put or else 0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000722 */
723
724int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
725{
726 struct address_space *mapping = page->mapping;
727 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
728 struct buffer_head *bh, *head;
729 struct gfs2_bufdata *bd;
730
731 if (!page_has_buffers(page))
732 return 0;
733
734 /*
735 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
736 * clean pages might not have had the dirty bit cleared. Thus, it can
737 * send actual dirty pages to ->releasepage() via shrink_active_list().
738 *
739 * As a workaround, we skip pages that contain dirty buffers below.
740 * Once ->releasepage isn't called on dirty pages anymore, we can warn
741 * on dirty buffers like we used to here again.
742 */
743
744 gfs2_log_lock(sdp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000745 head = bh = page_buffers(page);
746 do {
747 if (atomic_read(&bh->b_count))
748 goto cannot_release;
749 bd = bh->b_private;
750 if (bd && bd->bd_tr)
751 goto cannot_release;
752 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
753 goto cannot_release;
754 bh = bh->b_this_page;
755 } while(bh != head);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000756
757 head = bh = page_buffers(page);
758 do {
759 bd = bh->b_private;
760 if (bd) {
761 gfs2_assert_warn(sdp, bd->bd_bh == bh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000762 bd->bd_bh = NULL;
763 bh->b_private = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +0200764 /*
765 * The bd may still be queued as a revoke, in which
766 * case we must not dequeue nor free it.
767 */
768 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
769 list_del_init(&bd->bd_list);
770 if (list_empty(&bd->bd_list))
771 kmem_cache_free(gfs2_bufdata_cachep, bd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000772 }
773
774 bh = bh->b_this_page;
775 } while (bh != head);
776 gfs2_log_unlock(sdp);
777
778 return try_to_free_buffers(page);
779
780cannot_release:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000781 gfs2_log_unlock(sdp);
782 return 0;
783}
784
David Brazdil0f672f62019-12-10 10:32:29 +0000785static const struct address_space_operations gfs2_aops = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000786 .writepage = gfs2_writepage,
787 .writepages = gfs2_writepages,
788 .readpage = gfs2_readpage,
Olivier Deprez157378f2022-04-04 15:47:50 +0200789 .readahead = gfs2_readahead,
790 .set_page_dirty = iomap_set_page_dirty,
791 .releasepage = iomap_releasepage,
792 .invalidatepage = iomap_invalidatepage,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000793 .bmap = gfs2_bmap,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000794 .direct_IO = noop_direct_IO,
Olivier Deprez157378f2022-04-04 15:47:50 +0200795 .migratepage = iomap_migrate_page,
796 .is_partially_uptodate = iomap_is_partially_uptodate,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000797 .error_remove_page = generic_error_remove_page,
798};
799
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000800static const struct address_space_operations gfs2_jdata_aops = {
801 .writepage = gfs2_jdata_writepage,
802 .writepages = gfs2_jdata_writepages,
803 .readpage = gfs2_readpage,
Olivier Deprez157378f2022-04-04 15:47:50 +0200804 .readahead = gfs2_readahead,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000805 .set_page_dirty = jdata_set_page_dirty,
806 .bmap = gfs2_bmap,
807 .invalidatepage = gfs2_invalidatepage,
808 .releasepage = gfs2_releasepage,
809 .is_partially_uptodate = block_is_partially_uptodate,
810 .error_remove_page = generic_error_remove_page,
811};
812
813void gfs2_set_aops(struct inode *inode)
814{
David Brazdil0f672f62019-12-10 10:32:29 +0000815 if (gfs2_is_jdata(GFS2_I(inode)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000816 inode->i_mapping->a_ops = &gfs2_jdata_aops;
817 else
David Brazdil0f672f62019-12-10 10:32:29 +0000818 inode->i_mapping->a_ops = &gfs2_aops;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000819}