Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
| 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
| 4 | * |
| 5 | * This copyrighted material is made available to anyone wishing to use, |
| 6 | * modify, copy, or redistribute it subject to the terms and conditions |
| 7 | * of the GNU General Public License version 2. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/spinlock.h> |
| 11 | #include <linux/completion.h> |
| 12 | #include <linux/buffer_head.h> |
| 13 | #include <linux/gfs2_ondisk.h> |
| 14 | #include <linux/bio.h> |
| 15 | #include <linux/posix_acl.h> |
| 16 | #include <linux/security.h> |
| 17 | |
| 18 | #include "gfs2.h" |
| 19 | #include "incore.h" |
| 20 | #include "bmap.h" |
| 21 | #include "glock.h" |
| 22 | #include "glops.h" |
| 23 | #include "inode.h" |
| 24 | #include "log.h" |
| 25 | #include "meta_io.h" |
| 26 | #include "recovery.h" |
| 27 | #include "rgrp.h" |
| 28 | #include "util.h" |
| 29 | #include "trans.h" |
| 30 | #include "dir.h" |
| 31 | |
| 32 | struct workqueue_struct *gfs2_freeze_wq; |
| 33 | |
| 34 | static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) |
| 35 | { |
| 36 | fs_err(gl->gl_name.ln_sbd, |
| 37 | "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " |
| 38 | "state 0x%lx\n", |
| 39 | bh, (unsigned long long)bh->b_blocknr, bh->b_state, |
| 40 | bh->b_page->mapping, bh->b_page->flags); |
| 41 | fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", |
| 42 | gl->gl_name.ln_type, gl->gl_name.ln_number, |
| 43 | gfs2_glock2aspace(gl)); |
| 44 | gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n"); |
| 45 | } |
| 46 | |
| 47 | /** |
| 48 | * __gfs2_ail_flush - remove all buffers for a given lock from the AIL |
| 49 | * @gl: the glock |
| 50 | * @fsync: set when called from fsync (not all buffers will be clean) |
| 51 | * |
| 52 | * None of the buffers should be dirty, locked, or pinned. |
| 53 | */ |
| 54 | |
| 55 | static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, |
| 56 | unsigned int nr_revokes) |
| 57 | { |
| 58 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 59 | struct list_head *head = &gl->gl_ail_list; |
| 60 | struct gfs2_bufdata *bd, *tmp; |
| 61 | struct buffer_head *bh; |
| 62 | const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); |
| 63 | |
| 64 | gfs2_log_lock(sdp); |
| 65 | spin_lock(&sdp->sd_ail_lock); |
| 66 | list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { |
| 67 | if (nr_revokes == 0) |
| 68 | break; |
| 69 | bh = bd->bd_bh; |
| 70 | if (bh->b_state & b_state) { |
| 71 | if (fsync) |
| 72 | continue; |
| 73 | gfs2_ail_error(gl, bh); |
| 74 | } |
| 75 | gfs2_trans_add_revoke(sdp, bd); |
| 76 | nr_revokes--; |
| 77 | } |
| 78 | GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); |
| 79 | spin_unlock(&sdp->sd_ail_lock); |
| 80 | gfs2_log_unlock(sdp); |
| 81 | } |
| 82 | |
| 83 | |
| 84 | static void gfs2_ail_empty_gl(struct gfs2_glock *gl) |
| 85 | { |
| 86 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 87 | struct gfs2_trans tr; |
| 88 | |
| 89 | memset(&tr, 0, sizeof(tr)); |
| 90 | INIT_LIST_HEAD(&tr.tr_buf); |
| 91 | INIT_LIST_HEAD(&tr.tr_databuf); |
| 92 | tr.tr_revokes = atomic_read(&gl->gl_ail_count); |
| 93 | |
| 94 | if (!tr.tr_revokes) |
| 95 | return; |
| 96 | |
| 97 | /* A shortened, inline version of gfs2_trans_begin() |
| 98 | * tr->alloced is not set since the transaction structure is |
| 99 | * on the stack */ |
| 100 | tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); |
| 101 | tr.tr_ip = _RET_IP_; |
| 102 | if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0) |
| 103 | return; |
| 104 | WARN_ON_ONCE(current->journal_info); |
| 105 | current->journal_info = &tr; |
| 106 | |
| 107 | __gfs2_ail_flush(gl, 0, tr.tr_revokes); |
| 108 | |
| 109 | gfs2_trans_end(sdp); |
| 110 | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | |
| 111 | GFS2_LFC_AIL_EMPTY_GL); |
| 112 | } |
| 113 | |
| 114 | void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) |
| 115 | { |
| 116 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 117 | unsigned int revokes = atomic_read(&gl->gl_ail_count); |
| 118 | unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); |
| 119 | int ret; |
| 120 | |
| 121 | if (!revokes) |
| 122 | return; |
| 123 | |
| 124 | while (revokes > max_revokes) |
| 125 | max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); |
| 126 | |
| 127 | ret = gfs2_trans_begin(sdp, 0, max_revokes); |
| 128 | if (ret) |
| 129 | return; |
| 130 | __gfs2_ail_flush(gl, fsync, max_revokes); |
| 131 | gfs2_trans_end(sdp); |
| 132 | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | |
| 133 | GFS2_LFC_AIL_FLUSH); |
| 134 | } |
| 135 | |
| 136 | /** |
| 137 | * rgrp_go_sync - sync out the metadata for this glock |
| 138 | * @gl: the glock |
| 139 | * |
| 140 | * Called when demoting or unlocking an EX glock. We must flush |
| 141 | * to disk all dirty buffers/pages relating to this glock, and must not |
| 142 | * return to caller to demote/unlock the glock until I/O is complete. |
| 143 | */ |
| 144 | |
| 145 | static void rgrp_go_sync(struct gfs2_glock *gl) |
| 146 | { |
| 147 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 148 | struct address_space *mapping = &sdp->sd_aspace; |
| 149 | struct gfs2_rgrpd *rgd; |
| 150 | int error; |
| 151 | |
| 152 | spin_lock(&gl->gl_lockref.lock); |
| 153 | rgd = gl->gl_object; |
| 154 | if (rgd) |
| 155 | gfs2_rgrp_brelse(rgd); |
| 156 | spin_unlock(&gl->gl_lockref.lock); |
| 157 | |
| 158 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) |
| 159 | return; |
| 160 | GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); |
| 161 | |
| 162 | gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | |
| 163 | GFS2_LFC_RGRP_GO_SYNC); |
| 164 | filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); |
| 165 | error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); |
| 166 | mapping_set_error(mapping, error); |
| 167 | gfs2_ail_empty_gl(gl); |
| 168 | |
| 169 | spin_lock(&gl->gl_lockref.lock); |
| 170 | rgd = gl->gl_object; |
| 171 | if (rgd) |
| 172 | gfs2_free_clones(rgd); |
| 173 | spin_unlock(&gl->gl_lockref.lock); |
| 174 | } |
| 175 | |
| 176 | /** |
| 177 | * rgrp_go_inval - invalidate the metadata for this glock |
| 178 | * @gl: the glock |
| 179 | * @flags: |
| 180 | * |
| 181 | * We never used LM_ST_DEFERRED with resource groups, so that we |
| 182 | * should always see the metadata flag set here. |
| 183 | * |
| 184 | */ |
| 185 | |
| 186 | static void rgrp_go_inval(struct gfs2_glock *gl, int flags) |
| 187 | { |
| 188 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 189 | struct address_space *mapping = &sdp->sd_aspace; |
| 190 | struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); |
| 191 | |
| 192 | if (rgd) |
| 193 | gfs2_rgrp_brelse(rgd); |
| 194 | |
| 195 | WARN_ON_ONCE(!(flags & DIO_METADATA)); |
| 196 | gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); |
| 197 | truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); |
| 198 | |
| 199 | if (rgd) |
| 200 | rgd->rd_flags &= ~GFS2_RDF_UPTODATE; |
| 201 | } |
| 202 | |
| 203 | static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) |
| 204 | { |
| 205 | struct gfs2_inode *ip; |
| 206 | |
| 207 | spin_lock(&gl->gl_lockref.lock); |
| 208 | ip = gl->gl_object; |
| 209 | if (ip) |
| 210 | set_bit(GIF_GLOP_PENDING, &ip->i_flags); |
| 211 | spin_unlock(&gl->gl_lockref.lock); |
| 212 | return ip; |
| 213 | } |
| 214 | |
| 215 | struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl) |
| 216 | { |
| 217 | struct gfs2_rgrpd *rgd; |
| 218 | |
| 219 | spin_lock(&gl->gl_lockref.lock); |
| 220 | rgd = gl->gl_object; |
| 221 | spin_unlock(&gl->gl_lockref.lock); |
| 222 | |
| 223 | return rgd; |
| 224 | } |
| 225 | |
| 226 | static void gfs2_clear_glop_pending(struct gfs2_inode *ip) |
| 227 | { |
| 228 | if (!ip) |
| 229 | return; |
| 230 | |
| 231 | clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags); |
| 232 | wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING); |
| 233 | } |
| 234 | |
| 235 | /** |
| 236 | * inode_go_sync - Sync the dirty data and/or metadata for an inode glock |
| 237 | * @gl: the glock protecting the inode |
| 238 | * |
| 239 | */ |
| 240 | |
| 241 | static void inode_go_sync(struct gfs2_glock *gl) |
| 242 | { |
| 243 | struct gfs2_inode *ip = gfs2_glock2inode(gl); |
| 244 | int isreg = ip && S_ISREG(ip->i_inode.i_mode); |
| 245 | struct address_space *metamapping = gfs2_glock2aspace(gl); |
| 246 | int error; |
| 247 | |
| 248 | if (isreg) { |
| 249 | if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) |
| 250 | unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); |
| 251 | inode_dio_wait(&ip->i_inode); |
| 252 | } |
| 253 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) |
| 254 | goto out; |
| 255 | |
| 256 | GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); |
| 257 | |
| 258 | gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | |
| 259 | GFS2_LFC_INODE_GO_SYNC); |
| 260 | filemap_fdatawrite(metamapping); |
| 261 | if (isreg) { |
| 262 | struct address_space *mapping = ip->i_inode.i_mapping; |
| 263 | filemap_fdatawrite(mapping); |
| 264 | error = filemap_fdatawait(mapping); |
| 265 | mapping_set_error(mapping, error); |
| 266 | } |
| 267 | error = filemap_fdatawait(metamapping); |
| 268 | mapping_set_error(metamapping, error); |
| 269 | gfs2_ail_empty_gl(gl); |
| 270 | /* |
| 271 | * Writeback of the data mapping may cause the dirty flag to be set |
| 272 | * so we have to clear it again here. |
| 273 | */ |
| 274 | smp_mb__before_atomic(); |
| 275 | clear_bit(GLF_DIRTY, &gl->gl_flags); |
| 276 | |
| 277 | out: |
| 278 | gfs2_clear_glop_pending(ip); |
| 279 | } |
| 280 | |
| 281 | /** |
| 282 | * inode_go_inval - prepare a inode glock to be released |
| 283 | * @gl: the glock |
| 284 | * @flags: |
| 285 | * |
| 286 | * Normally we invalidate everything, but if we are moving into |
| 287 | * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we |
| 288 | * can keep hold of the metadata, since it won't have changed. |
| 289 | * |
| 290 | */ |
| 291 | |
| 292 | static void inode_go_inval(struct gfs2_glock *gl, int flags) |
| 293 | { |
| 294 | struct gfs2_inode *ip = gfs2_glock2inode(gl); |
| 295 | |
| 296 | gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); |
| 297 | |
| 298 | if (flags & DIO_METADATA) { |
| 299 | struct address_space *mapping = gfs2_glock2aspace(gl); |
| 300 | truncate_inode_pages(mapping, 0); |
| 301 | if (ip) { |
| 302 | set_bit(GIF_INVALID, &ip->i_flags); |
| 303 | forget_all_cached_acls(&ip->i_inode); |
| 304 | security_inode_invalidate_secctx(&ip->i_inode); |
| 305 | gfs2_dir_hash_inval(ip); |
| 306 | } |
| 307 | } |
| 308 | |
| 309 | if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { |
| 310 | gfs2_log_flush(gl->gl_name.ln_sbd, NULL, |
| 311 | GFS2_LOG_HEAD_FLUSH_NORMAL | |
| 312 | GFS2_LFC_INODE_GO_INVAL); |
| 313 | gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; |
| 314 | } |
| 315 | if (ip && S_ISREG(ip->i_inode.i_mode)) |
| 316 | truncate_inode_pages(ip->i_inode.i_mapping, 0); |
| 317 | |
| 318 | gfs2_clear_glop_pending(ip); |
| 319 | } |
| 320 | |
| 321 | /** |
| 322 | * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock |
| 323 | * @gl: the glock |
| 324 | * |
| 325 | * Returns: 1 if it's ok |
| 326 | */ |
| 327 | |
| 328 | static int inode_go_demote_ok(const struct gfs2_glock *gl) |
| 329 | { |
| 330 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 331 | |
| 332 | if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) |
| 333 | return 0; |
| 334 | |
| 335 | return 1; |
| 336 | } |
| 337 | |
| 338 | static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) |
| 339 | { |
| 340 | const struct gfs2_dinode *str = buf; |
| 341 | struct timespec64 atime; |
| 342 | u16 height, depth; |
| 343 | |
| 344 | if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) |
| 345 | goto corrupt; |
| 346 | ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); |
| 347 | ip->i_inode.i_mode = be32_to_cpu(str->di_mode); |
| 348 | ip->i_inode.i_rdev = 0; |
| 349 | switch (ip->i_inode.i_mode & S_IFMT) { |
| 350 | case S_IFBLK: |
| 351 | case S_IFCHR: |
| 352 | ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), |
| 353 | be32_to_cpu(str->di_minor)); |
| 354 | break; |
| 355 | }; |
| 356 | |
| 357 | i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid)); |
| 358 | i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid)); |
| 359 | set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); |
| 360 | i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); |
| 361 | gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); |
| 362 | atime.tv_sec = be64_to_cpu(str->di_atime); |
| 363 | atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); |
| 364 | if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0) |
| 365 | ip->i_inode.i_atime = atime; |
| 366 | ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); |
| 367 | ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); |
| 368 | ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); |
| 369 | ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); |
| 370 | |
| 371 | ip->i_goal = be64_to_cpu(str->di_goal_meta); |
| 372 | ip->i_generation = be64_to_cpu(str->di_generation); |
| 373 | |
| 374 | ip->i_diskflags = be32_to_cpu(str->di_flags); |
| 375 | ip->i_eattr = be64_to_cpu(str->di_eattr); |
| 376 | /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ |
| 377 | gfs2_set_inode_flags(&ip->i_inode); |
| 378 | height = be16_to_cpu(str->di_height); |
| 379 | if (unlikely(height > GFS2_MAX_META_HEIGHT)) |
| 380 | goto corrupt; |
| 381 | ip->i_height = (u8)height; |
| 382 | |
| 383 | depth = be16_to_cpu(str->di_depth); |
| 384 | if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) |
| 385 | goto corrupt; |
| 386 | ip->i_depth = (u8)depth; |
| 387 | ip->i_entries = be32_to_cpu(str->di_entries); |
| 388 | |
| 389 | if (S_ISREG(ip->i_inode.i_mode)) |
| 390 | gfs2_set_aops(&ip->i_inode); |
| 391 | |
| 392 | return 0; |
| 393 | corrupt: |
| 394 | gfs2_consist_inode(ip); |
| 395 | return -EIO; |
| 396 | } |
| 397 | |
| 398 | /** |
| 399 | * gfs2_inode_refresh - Refresh the incore copy of the dinode |
| 400 | * @ip: The GFS2 inode |
| 401 | * |
| 402 | * Returns: errno |
| 403 | */ |
| 404 | |
| 405 | int gfs2_inode_refresh(struct gfs2_inode *ip) |
| 406 | { |
| 407 | struct buffer_head *dibh; |
| 408 | int error; |
| 409 | |
| 410 | error = gfs2_meta_inode_buffer(ip, &dibh); |
| 411 | if (error) |
| 412 | return error; |
| 413 | |
| 414 | error = gfs2_dinode_in(ip, dibh->b_data); |
| 415 | brelse(dibh); |
| 416 | clear_bit(GIF_INVALID, &ip->i_flags); |
| 417 | |
| 418 | return error; |
| 419 | } |
| 420 | |
| 421 | /** |
| 422 | * inode_go_lock - operation done after an inode lock is locked by a process |
| 423 | * @gl: the glock |
| 424 | * @flags: |
| 425 | * |
| 426 | * Returns: errno |
| 427 | */ |
| 428 | |
| 429 | static int inode_go_lock(struct gfs2_holder *gh) |
| 430 | { |
| 431 | struct gfs2_glock *gl = gh->gh_gl; |
| 432 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 433 | struct gfs2_inode *ip = gl->gl_object; |
| 434 | int error = 0; |
| 435 | |
| 436 | if (!ip || (gh->gh_flags & GL_SKIP)) |
| 437 | return 0; |
| 438 | |
| 439 | if (test_bit(GIF_INVALID, &ip->i_flags)) { |
| 440 | error = gfs2_inode_refresh(ip); |
| 441 | if (error) |
| 442 | return error; |
| 443 | } |
| 444 | |
| 445 | if (gh->gh_state != LM_ST_DEFERRED) |
| 446 | inode_dio_wait(&ip->i_inode); |
| 447 | |
| 448 | if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && |
| 449 | (gl->gl_state == LM_ST_EXCLUSIVE) && |
| 450 | (gh->gh_state == LM_ST_EXCLUSIVE)) { |
| 451 | spin_lock(&sdp->sd_trunc_lock); |
| 452 | if (list_empty(&ip->i_trunc_list)) |
| 453 | list_add(&ip->i_trunc_list, &sdp->sd_trunc_list); |
| 454 | spin_unlock(&sdp->sd_trunc_lock); |
| 455 | wake_up(&sdp->sd_quota_wait); |
| 456 | return 1; |
| 457 | } |
| 458 | |
| 459 | return error; |
| 460 | } |
| 461 | |
| 462 | /** |
| 463 | * inode_go_dump - print information about an inode |
| 464 | * @seq: The iterator |
| 465 | * @ip: the inode |
| 466 | * |
| 467 | */ |
| 468 | |
| 469 | static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) |
| 470 | { |
| 471 | const struct gfs2_inode *ip = gl->gl_object; |
| 472 | if (ip == NULL) |
| 473 | return; |
| 474 | gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n", |
| 475 | (unsigned long long)ip->i_no_formal_ino, |
| 476 | (unsigned long long)ip->i_no_addr, |
| 477 | IF2DT(ip->i_inode.i_mode), ip->i_flags, |
| 478 | (unsigned int)ip->i_diskflags, |
| 479 | (unsigned long long)i_size_read(&ip->i_inode)); |
| 480 | } |
| 481 | |
| 482 | /** |
| 483 | * freeze_go_sync - promote/demote the freeze glock |
| 484 | * @gl: the glock |
| 485 | * @state: the requested state |
| 486 | * @flags: |
| 487 | * |
| 488 | */ |
| 489 | |
| 490 | static void freeze_go_sync(struct gfs2_glock *gl) |
| 491 | { |
| 492 | int error = 0; |
| 493 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 494 | |
| 495 | if (gl->gl_state == LM_ST_SHARED && |
| 496 | test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { |
| 497 | atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE); |
| 498 | error = freeze_super(sdp->sd_vfs); |
| 499 | if (error) { |
| 500 | printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error); |
| 501 | gfs2_assert_withdraw(sdp, 0); |
| 502 | } |
| 503 | queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work); |
| 504 | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE | |
| 505 | GFS2_LFC_FREEZE_GO_SYNC); |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | /** |
| 510 | * freeze_go_xmote_bh - After promoting/demoting the freeze glock |
| 511 | * @gl: the glock |
| 512 | * |
| 513 | */ |
| 514 | |
| 515 | static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) |
| 516 | { |
| 517 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 518 | struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); |
| 519 | struct gfs2_glock *j_gl = ip->i_gl; |
| 520 | struct gfs2_log_header_host head; |
| 521 | int error; |
| 522 | |
| 523 | if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { |
| 524 | j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); |
| 525 | |
| 526 | error = gfs2_find_jhead(sdp->sd_jdesc, &head); |
| 527 | if (error) |
| 528 | gfs2_consist(sdp); |
| 529 | if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) |
| 530 | gfs2_consist(sdp); |
| 531 | |
| 532 | /* Initialize some head of the log stuff */ |
| 533 | if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { |
| 534 | sdp->sd_log_sequence = head.lh_sequence + 1; |
| 535 | gfs2_log_pointers_init(sdp, head.lh_blkno); |
| 536 | } |
| 537 | } |
| 538 | return 0; |
| 539 | } |
| 540 | |
| 541 | /** |
| 542 | * trans_go_demote_ok |
| 543 | * @gl: the glock |
| 544 | * |
| 545 | * Always returns 0 |
| 546 | */ |
| 547 | |
| 548 | static int freeze_go_demote_ok(const struct gfs2_glock *gl) |
| 549 | { |
| 550 | return 0; |
| 551 | } |
| 552 | |
| 553 | /** |
| 554 | * iopen_go_callback - schedule the dcache entry for the inode to be deleted |
| 555 | * @gl: the glock |
| 556 | * |
| 557 | * gl_lockref.lock lock is held while calling this |
| 558 | */ |
| 559 | static void iopen_go_callback(struct gfs2_glock *gl, bool remote) |
| 560 | { |
| 561 | struct gfs2_inode *ip = gl->gl_object; |
| 562 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
| 563 | |
| 564 | if (!remote || sb_rdonly(sdp->sd_vfs)) |
| 565 | return; |
| 566 | |
| 567 | if (gl->gl_demote_state == LM_ST_UNLOCKED && |
| 568 | gl->gl_state == LM_ST_SHARED && ip) { |
| 569 | gl->gl_lockref.count++; |
| 570 | if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) |
| 571 | gl->gl_lockref.count--; |
| 572 | } |
| 573 | } |
| 574 | |
| 575 | const struct gfs2_glock_operations gfs2_meta_glops = { |
| 576 | .go_type = LM_TYPE_META, |
| 577 | }; |
| 578 | |
| 579 | const struct gfs2_glock_operations gfs2_inode_glops = { |
| 580 | .go_sync = inode_go_sync, |
| 581 | .go_inval = inode_go_inval, |
| 582 | .go_demote_ok = inode_go_demote_ok, |
| 583 | .go_lock = inode_go_lock, |
| 584 | .go_dump = inode_go_dump, |
| 585 | .go_type = LM_TYPE_INODE, |
| 586 | .go_flags = GLOF_ASPACE | GLOF_LRU, |
| 587 | }; |
| 588 | |
| 589 | const struct gfs2_glock_operations gfs2_rgrp_glops = { |
| 590 | .go_sync = rgrp_go_sync, |
| 591 | .go_inval = rgrp_go_inval, |
| 592 | .go_lock = gfs2_rgrp_go_lock, |
| 593 | .go_unlock = gfs2_rgrp_go_unlock, |
| 594 | .go_dump = gfs2_rgrp_dump, |
| 595 | .go_type = LM_TYPE_RGRP, |
| 596 | .go_flags = GLOF_LVB, |
| 597 | }; |
| 598 | |
| 599 | const struct gfs2_glock_operations gfs2_freeze_glops = { |
| 600 | .go_sync = freeze_go_sync, |
| 601 | .go_xmote_bh = freeze_go_xmote_bh, |
| 602 | .go_demote_ok = freeze_go_demote_ok, |
| 603 | .go_type = LM_TYPE_NONDISK, |
| 604 | }; |
| 605 | |
| 606 | const struct gfs2_glock_operations gfs2_iopen_glops = { |
| 607 | .go_type = LM_TYPE_IOPEN, |
| 608 | .go_callback = iopen_go_callback, |
| 609 | .go_flags = GLOF_LRU, |
| 610 | }; |
| 611 | |
| 612 | const struct gfs2_glock_operations gfs2_flock_glops = { |
| 613 | .go_type = LM_TYPE_FLOCK, |
| 614 | .go_flags = GLOF_LRU, |
| 615 | }; |
| 616 | |
| 617 | const struct gfs2_glock_operations gfs2_nondisk_glops = { |
| 618 | .go_type = LM_TYPE_NONDISK, |
| 619 | }; |
| 620 | |
| 621 | const struct gfs2_glock_operations gfs2_quota_glops = { |
| 622 | .go_type = LM_TYPE_QUOTA, |
| 623 | .go_flags = GLOF_LVB | GLOF_LRU, |
| 624 | }; |
| 625 | |
| 626 | const struct gfs2_glock_operations gfs2_journal_glops = { |
| 627 | .go_type = LM_TYPE_JOURNAL, |
| 628 | }; |
| 629 | |
| 630 | const struct gfs2_glock_operations *gfs2_glops_list[] = { |
| 631 | [LM_TYPE_META] = &gfs2_meta_glops, |
| 632 | [LM_TYPE_INODE] = &gfs2_inode_glops, |
| 633 | [LM_TYPE_RGRP] = &gfs2_rgrp_glops, |
| 634 | [LM_TYPE_IOPEN] = &gfs2_iopen_glops, |
| 635 | [LM_TYPE_FLOCK] = &gfs2_flock_glops, |
| 636 | [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, |
| 637 | [LM_TYPE_QUOTA] = &gfs2_quota_glops, |
| 638 | [LM_TYPE_JOURNAL] = &gfs2_journal_glops, |
| 639 | }; |
| 640 | |