Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
| 5 | */ |
| 6 | #include "xfs.h" |
| 7 | #include "xfs_fs.h" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8 | #include "xfs_shared.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9 | #include "xfs_format.h" |
| 10 | #include "xfs_log_format.h" |
| 11 | #include "xfs_trans_resv.h" |
| 12 | #include "xfs_mount.h" |
| 13 | #include "xfs_inode.h" |
| 14 | #include "xfs_trans.h" |
| 15 | #include "xfs_inode_item.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 16 | #include "xfs_trace.h" |
| 17 | #include "xfs_trans_priv.h" |
| 18 | #include "xfs_buf_item.h" |
| 19 | #include "xfs_log.h" |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 20 | #include "xfs_error.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 21 | |
| 22 | #include <linux/iversion.h> |
| 23 | |
| 24 | kmem_zone_t *xfs_ili_zone; /* inode log item zone */ |
| 25 | |
| 26 | static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip) |
| 27 | { |
| 28 | return container_of(lip, struct xfs_inode_log_item, ili_item); |
| 29 | } |
| 30 | |
| 31 | STATIC void |
| 32 | xfs_inode_item_data_fork_size( |
| 33 | struct xfs_inode_log_item *iip, |
| 34 | int *nvecs, |
| 35 | int *nbytes) |
| 36 | { |
| 37 | struct xfs_inode *ip = iip->ili_inode; |
| 38 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 39 | switch (ip->i_df.if_format) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | case XFS_DINODE_FMT_EXTENTS: |
| 41 | if ((iip->ili_fields & XFS_ILOG_DEXT) && |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 42 | ip->i_df.if_nextents > 0 && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 43 | ip->i_df.if_bytes > 0) { |
| 44 | /* worst case, doesn't subtract delalloc extents */ |
| 45 | *nbytes += XFS_IFORK_DSIZE(ip); |
| 46 | *nvecs += 1; |
| 47 | } |
| 48 | break; |
| 49 | case XFS_DINODE_FMT_BTREE: |
| 50 | if ((iip->ili_fields & XFS_ILOG_DBROOT) && |
| 51 | ip->i_df.if_broot_bytes > 0) { |
| 52 | *nbytes += ip->i_df.if_broot_bytes; |
| 53 | *nvecs += 1; |
| 54 | } |
| 55 | break; |
| 56 | case XFS_DINODE_FMT_LOCAL: |
| 57 | if ((iip->ili_fields & XFS_ILOG_DDATA) && |
| 58 | ip->i_df.if_bytes > 0) { |
| 59 | *nbytes += roundup(ip->i_df.if_bytes, 4); |
| 60 | *nvecs += 1; |
| 61 | } |
| 62 | break; |
| 63 | |
| 64 | case XFS_DINODE_FMT_DEV: |
| 65 | break; |
| 66 | default: |
| 67 | ASSERT(0); |
| 68 | break; |
| 69 | } |
| 70 | } |
| 71 | |
| 72 | STATIC void |
| 73 | xfs_inode_item_attr_fork_size( |
| 74 | struct xfs_inode_log_item *iip, |
| 75 | int *nvecs, |
| 76 | int *nbytes) |
| 77 | { |
| 78 | struct xfs_inode *ip = iip->ili_inode; |
| 79 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 80 | switch (ip->i_afp->if_format) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | case XFS_DINODE_FMT_EXTENTS: |
| 82 | if ((iip->ili_fields & XFS_ILOG_AEXT) && |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 83 | ip->i_afp->if_nextents > 0 && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | ip->i_afp->if_bytes > 0) { |
| 85 | /* worst case, doesn't subtract unused space */ |
| 86 | *nbytes += XFS_IFORK_ASIZE(ip); |
| 87 | *nvecs += 1; |
| 88 | } |
| 89 | break; |
| 90 | case XFS_DINODE_FMT_BTREE: |
| 91 | if ((iip->ili_fields & XFS_ILOG_ABROOT) && |
| 92 | ip->i_afp->if_broot_bytes > 0) { |
| 93 | *nbytes += ip->i_afp->if_broot_bytes; |
| 94 | *nvecs += 1; |
| 95 | } |
| 96 | break; |
| 97 | case XFS_DINODE_FMT_LOCAL: |
| 98 | if ((iip->ili_fields & XFS_ILOG_ADATA) && |
| 99 | ip->i_afp->if_bytes > 0) { |
| 100 | *nbytes += roundup(ip->i_afp->if_bytes, 4); |
| 101 | *nvecs += 1; |
| 102 | } |
| 103 | break; |
| 104 | default: |
| 105 | ASSERT(0); |
| 106 | break; |
| 107 | } |
| 108 | } |
| 109 | |
| 110 | /* |
| 111 | * This returns the number of iovecs needed to log the given inode item. |
| 112 | * |
| 113 | * We need one iovec for the inode log format structure, one for the |
| 114 | * inode core, and possibly one for the inode data/extents/b-tree root |
| 115 | * and one for the inode attribute data/extents/b-tree root. |
| 116 | */ |
| 117 | STATIC void |
| 118 | xfs_inode_item_size( |
| 119 | struct xfs_log_item *lip, |
| 120 | int *nvecs, |
| 121 | int *nbytes) |
| 122 | { |
| 123 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 124 | struct xfs_inode *ip = iip->ili_inode; |
| 125 | |
| 126 | *nvecs += 2; |
| 127 | *nbytes += sizeof(struct xfs_inode_log_format) + |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 128 | xfs_log_dinode_size(ip->i_mount); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 129 | |
| 130 | xfs_inode_item_data_fork_size(iip, nvecs, nbytes); |
| 131 | if (XFS_IFORK_Q(ip)) |
| 132 | xfs_inode_item_attr_fork_size(iip, nvecs, nbytes); |
| 133 | } |
| 134 | |
| 135 | STATIC void |
| 136 | xfs_inode_item_format_data_fork( |
| 137 | struct xfs_inode_log_item *iip, |
| 138 | struct xfs_inode_log_format *ilf, |
| 139 | struct xfs_log_vec *lv, |
| 140 | struct xfs_log_iovec **vecp) |
| 141 | { |
| 142 | struct xfs_inode *ip = iip->ili_inode; |
| 143 | size_t data_bytes; |
| 144 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 145 | switch (ip->i_df.if_format) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 146 | case XFS_DINODE_FMT_EXTENTS: |
| 147 | iip->ili_fields &= |
| 148 | ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV); |
| 149 | |
| 150 | if ((iip->ili_fields & XFS_ILOG_DEXT) && |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 151 | ip->i_df.if_nextents > 0 && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 152 | ip->i_df.if_bytes > 0) { |
| 153 | struct xfs_bmbt_rec *p; |
| 154 | |
| 155 | ASSERT(xfs_iext_count(&ip->i_df) > 0); |
| 156 | |
| 157 | p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT); |
| 158 | data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK); |
| 159 | xlog_finish_iovec(lv, *vecp, data_bytes); |
| 160 | |
| 161 | ASSERT(data_bytes <= ip->i_df.if_bytes); |
| 162 | |
| 163 | ilf->ilf_dsize = data_bytes; |
| 164 | ilf->ilf_size++; |
| 165 | } else { |
| 166 | iip->ili_fields &= ~XFS_ILOG_DEXT; |
| 167 | } |
| 168 | break; |
| 169 | case XFS_DINODE_FMT_BTREE: |
| 170 | iip->ili_fields &= |
| 171 | ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV); |
| 172 | |
| 173 | if ((iip->ili_fields & XFS_ILOG_DBROOT) && |
| 174 | ip->i_df.if_broot_bytes > 0) { |
| 175 | ASSERT(ip->i_df.if_broot != NULL); |
| 176 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IBROOT, |
| 177 | ip->i_df.if_broot, |
| 178 | ip->i_df.if_broot_bytes); |
| 179 | ilf->ilf_dsize = ip->i_df.if_broot_bytes; |
| 180 | ilf->ilf_size++; |
| 181 | } else { |
| 182 | ASSERT(!(iip->ili_fields & |
| 183 | XFS_ILOG_DBROOT)); |
| 184 | iip->ili_fields &= ~XFS_ILOG_DBROOT; |
| 185 | } |
| 186 | break; |
| 187 | case XFS_DINODE_FMT_LOCAL: |
| 188 | iip->ili_fields &= |
| 189 | ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | XFS_ILOG_DEV); |
| 190 | if ((iip->ili_fields & XFS_ILOG_DDATA) && |
| 191 | ip->i_df.if_bytes > 0) { |
| 192 | /* |
| 193 | * Round i_bytes up to a word boundary. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 194 | * The underlying memory is guaranteed |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 195 | * to be there by xfs_idata_realloc(). |
| 196 | */ |
| 197 | data_bytes = roundup(ip->i_df.if_bytes, 4); |
| 198 | ASSERT(ip->i_df.if_u1.if_data != NULL); |
| 199 | ASSERT(ip->i_d.di_size > 0); |
| 200 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL, |
| 201 | ip->i_df.if_u1.if_data, data_bytes); |
| 202 | ilf->ilf_dsize = (unsigned)data_bytes; |
| 203 | ilf->ilf_size++; |
| 204 | } else { |
| 205 | iip->ili_fields &= ~XFS_ILOG_DDATA; |
| 206 | } |
| 207 | break; |
| 208 | case XFS_DINODE_FMT_DEV: |
| 209 | iip->ili_fields &= |
| 210 | ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEXT); |
| 211 | if (iip->ili_fields & XFS_ILOG_DEV) |
| 212 | ilf->ilf_u.ilfu_rdev = sysv_encode_dev(VFS_I(ip)->i_rdev); |
| 213 | break; |
| 214 | default: |
| 215 | ASSERT(0); |
| 216 | break; |
| 217 | } |
| 218 | } |
| 219 | |
| 220 | STATIC void |
| 221 | xfs_inode_item_format_attr_fork( |
| 222 | struct xfs_inode_log_item *iip, |
| 223 | struct xfs_inode_log_format *ilf, |
| 224 | struct xfs_log_vec *lv, |
| 225 | struct xfs_log_iovec **vecp) |
| 226 | { |
| 227 | struct xfs_inode *ip = iip->ili_inode; |
| 228 | size_t data_bytes; |
| 229 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 230 | switch (ip->i_afp->if_format) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 231 | case XFS_DINODE_FMT_EXTENTS: |
| 232 | iip->ili_fields &= |
| 233 | ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT); |
| 234 | |
| 235 | if ((iip->ili_fields & XFS_ILOG_AEXT) && |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 236 | ip->i_afp->if_nextents > 0 && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 237 | ip->i_afp->if_bytes > 0) { |
| 238 | struct xfs_bmbt_rec *p; |
| 239 | |
| 240 | ASSERT(xfs_iext_count(ip->i_afp) == |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 241 | ip->i_afp->if_nextents); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 242 | |
| 243 | p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT); |
| 244 | data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK); |
| 245 | xlog_finish_iovec(lv, *vecp, data_bytes); |
| 246 | |
| 247 | ilf->ilf_asize = data_bytes; |
| 248 | ilf->ilf_size++; |
| 249 | } else { |
| 250 | iip->ili_fields &= ~XFS_ILOG_AEXT; |
| 251 | } |
| 252 | break; |
| 253 | case XFS_DINODE_FMT_BTREE: |
| 254 | iip->ili_fields &= |
| 255 | ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT); |
| 256 | |
| 257 | if ((iip->ili_fields & XFS_ILOG_ABROOT) && |
| 258 | ip->i_afp->if_broot_bytes > 0) { |
| 259 | ASSERT(ip->i_afp->if_broot != NULL); |
| 260 | |
| 261 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_BROOT, |
| 262 | ip->i_afp->if_broot, |
| 263 | ip->i_afp->if_broot_bytes); |
| 264 | ilf->ilf_asize = ip->i_afp->if_broot_bytes; |
| 265 | ilf->ilf_size++; |
| 266 | } else { |
| 267 | iip->ili_fields &= ~XFS_ILOG_ABROOT; |
| 268 | } |
| 269 | break; |
| 270 | case XFS_DINODE_FMT_LOCAL: |
| 271 | iip->ili_fields &= |
| 272 | ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT); |
| 273 | |
| 274 | if ((iip->ili_fields & XFS_ILOG_ADATA) && |
| 275 | ip->i_afp->if_bytes > 0) { |
| 276 | /* |
| 277 | * Round i_bytes up to a word boundary. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 278 | * The underlying memory is guaranteed |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 279 | * to be there by xfs_idata_realloc(). |
| 280 | */ |
| 281 | data_bytes = roundup(ip->i_afp->if_bytes, 4); |
| 282 | ASSERT(ip->i_afp->if_u1.if_data != NULL); |
| 283 | xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL, |
| 284 | ip->i_afp->if_u1.if_data, |
| 285 | data_bytes); |
| 286 | ilf->ilf_asize = (unsigned)data_bytes; |
| 287 | ilf->ilf_size++; |
| 288 | } else { |
| 289 | iip->ili_fields &= ~XFS_ILOG_ADATA; |
| 290 | } |
| 291 | break; |
| 292 | default: |
| 293 | ASSERT(0); |
| 294 | break; |
| 295 | } |
| 296 | } |
| 297 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 298 | /* |
| 299 | * Convert an incore timestamp to a log timestamp. Note that the log format |
| 300 | * specifies host endian format! |
| 301 | */ |
| 302 | static inline xfs_ictimestamp_t |
| 303 | xfs_inode_to_log_dinode_ts( |
| 304 | struct xfs_inode *ip, |
| 305 | const struct timespec64 tv) |
| 306 | { |
| 307 | struct xfs_legacy_ictimestamp *lits; |
| 308 | xfs_ictimestamp_t its; |
| 309 | |
| 310 | if (xfs_inode_has_bigtime(ip)) |
| 311 | return xfs_inode_encode_bigtime(tv); |
| 312 | |
| 313 | lits = (struct xfs_legacy_ictimestamp *)&its; |
| 314 | lits->t_sec = tv.tv_sec; |
| 315 | lits->t_nsec = tv.tv_nsec; |
| 316 | |
| 317 | return its; |
| 318 | } |
| 319 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 320 | static void |
| 321 | xfs_inode_to_log_dinode( |
| 322 | struct xfs_inode *ip, |
| 323 | struct xfs_log_dinode *to, |
| 324 | xfs_lsn_t lsn) |
| 325 | { |
| 326 | struct xfs_icdinode *from = &ip->i_d; |
| 327 | struct inode *inode = VFS_I(ip); |
| 328 | |
| 329 | to->di_magic = XFS_DINODE_MAGIC; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 330 | to->di_format = xfs_ifork_format(&ip->i_df); |
| 331 | to->di_uid = i_uid_read(inode); |
| 332 | to->di_gid = i_gid_read(inode); |
| 333 | to->di_projid_lo = from->di_projid & 0xffff; |
| 334 | to->di_projid_hi = from->di_projid >> 16; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 335 | |
| 336 | memset(to->di_pad, 0, sizeof(to->di_pad)); |
| 337 | memset(to->di_pad3, 0, sizeof(to->di_pad3)); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 338 | to->di_atime = xfs_inode_to_log_dinode_ts(ip, inode->i_atime); |
| 339 | to->di_mtime = xfs_inode_to_log_dinode_ts(ip, inode->i_mtime); |
| 340 | to->di_ctime = xfs_inode_to_log_dinode_ts(ip, inode->i_ctime); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 341 | to->di_nlink = inode->i_nlink; |
| 342 | to->di_gen = inode->i_generation; |
| 343 | to->di_mode = inode->i_mode; |
| 344 | |
| 345 | to->di_size = from->di_size; |
| 346 | to->di_nblocks = from->di_nblocks; |
| 347 | to->di_extsize = from->di_extsize; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 348 | to->di_nextents = xfs_ifork_nextents(&ip->i_df); |
| 349 | to->di_anextents = xfs_ifork_nextents(ip->i_afp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 350 | to->di_forkoff = from->di_forkoff; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 351 | to->di_aformat = xfs_ifork_format(ip->i_afp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 352 | to->di_dmevmask = from->di_dmevmask; |
| 353 | to->di_dmstate = from->di_dmstate; |
| 354 | to->di_flags = from->di_flags; |
| 355 | |
| 356 | /* log a dummy value to ensure log structure is fully initialised */ |
| 357 | to->di_next_unlinked = NULLAGINO; |
| 358 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 359 | if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) { |
| 360 | to->di_version = 3; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 361 | to->di_changecount = inode_peek_iversion(inode); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 362 | to->di_crtime = xfs_inode_to_log_dinode_ts(ip, from->di_crtime); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 363 | to->di_flags2 = from->di_flags2; |
| 364 | to->di_cowextsize = from->di_cowextsize; |
| 365 | to->di_ino = ip->i_ino; |
| 366 | to->di_lsn = lsn; |
| 367 | memset(to->di_pad2, 0, sizeof(to->di_pad2)); |
| 368 | uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid); |
| 369 | to->di_flushiter = 0; |
| 370 | } else { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 371 | to->di_version = 2; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 372 | to->di_flushiter = from->di_flushiter; |
| 373 | } |
| 374 | } |
| 375 | |
| 376 | /* |
| 377 | * Format the inode core. Current timestamp data is only in the VFS inode |
| 378 | * fields, so we need to grab them from there. Hence rather than just copying |
| 379 | * the XFS inode core structure, format the fields directly into the iovec. |
| 380 | */ |
| 381 | static void |
| 382 | xfs_inode_item_format_core( |
| 383 | struct xfs_inode *ip, |
| 384 | struct xfs_log_vec *lv, |
| 385 | struct xfs_log_iovec **vecp) |
| 386 | { |
| 387 | struct xfs_log_dinode *dic; |
| 388 | |
| 389 | dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE); |
| 390 | xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 391 | xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_mount)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 392 | } |
| 393 | |
| 394 | /* |
| 395 | * This is called to fill in the vector of log iovecs for the given inode |
| 396 | * log item. It fills the first item with an inode log format structure, |
| 397 | * the second with the on-disk inode structure, and a possible third and/or |
| 398 | * fourth with the inode data/extents/b-tree root and inode attributes |
| 399 | * data/extents/b-tree root. |
| 400 | * |
| 401 | * Note: Always use the 64 bit inode log format structure so we don't |
| 402 | * leave an uninitialised hole in the format item on 64 bit systems. Log |
| 403 | * recovery on 32 bit systems handles this just fine, so there's no reason |
| 404 | * for not using an initialising the properly padded structure all the time. |
| 405 | */ |
| 406 | STATIC void |
| 407 | xfs_inode_item_format( |
| 408 | struct xfs_log_item *lip, |
| 409 | struct xfs_log_vec *lv) |
| 410 | { |
| 411 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 412 | struct xfs_inode *ip = iip->ili_inode; |
| 413 | struct xfs_log_iovec *vecp = NULL; |
| 414 | struct xfs_inode_log_format *ilf; |
| 415 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 416 | ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT); |
| 417 | ilf->ilf_type = XFS_LI_INODE; |
| 418 | ilf->ilf_ino = ip->i_ino; |
| 419 | ilf->ilf_blkno = ip->i_imap.im_blkno; |
| 420 | ilf->ilf_len = ip->i_imap.im_len; |
| 421 | ilf->ilf_boffset = ip->i_imap.im_boffset; |
| 422 | ilf->ilf_fields = XFS_ILOG_CORE; |
| 423 | ilf->ilf_size = 2; /* format + core */ |
| 424 | |
| 425 | /* |
| 426 | * make sure we don't leak uninitialised data into the log in the case |
| 427 | * when we don't log every field in the inode. |
| 428 | */ |
| 429 | ilf->ilf_dsize = 0; |
| 430 | ilf->ilf_asize = 0; |
| 431 | ilf->ilf_pad = 0; |
| 432 | memset(&ilf->ilf_u, 0, sizeof(ilf->ilf_u)); |
| 433 | |
| 434 | xlog_finish_iovec(lv, vecp, sizeof(*ilf)); |
| 435 | |
| 436 | xfs_inode_item_format_core(ip, lv, &vecp); |
| 437 | xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); |
| 438 | if (XFS_IFORK_Q(ip)) { |
| 439 | xfs_inode_item_format_attr_fork(iip, ilf, lv, &vecp); |
| 440 | } else { |
| 441 | iip->ili_fields &= |
| 442 | ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT); |
| 443 | } |
| 444 | |
| 445 | /* update the format with the exact fields we actually logged */ |
| 446 | ilf->ilf_fields |= (iip->ili_fields & ~XFS_ILOG_TIMESTAMP); |
| 447 | } |
| 448 | |
| 449 | /* |
| 450 | * This is called to pin the inode associated with the inode log |
| 451 | * item in memory so it cannot be written out. |
| 452 | */ |
| 453 | STATIC void |
| 454 | xfs_inode_item_pin( |
| 455 | struct xfs_log_item *lip) |
| 456 | { |
| 457 | struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; |
| 458 | |
| 459 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 460 | ASSERT(lip->li_buf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 461 | |
| 462 | trace_xfs_inode_pin(ip, _RET_IP_); |
| 463 | atomic_inc(&ip->i_pincount); |
| 464 | } |
| 465 | |
| 466 | |
| 467 | /* |
| 468 | * This is called to unpin the inode associated with the inode log |
| 469 | * item which was previously pinned with a call to xfs_inode_item_pin(). |
| 470 | * |
| 471 | * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 472 | * |
| 473 | * Note that unpin can race with inode cluster buffer freeing marking the buffer |
| 474 | * stale. In that case, flush completions are run from the buffer unpin call, |
| 475 | * which may happen before the inode is unpinned. If we lose the race, there |
| 476 | * will be no buffer attached to the log item, but the inode will be marked |
| 477 | * XFS_ISTALE. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 478 | */ |
| 479 | STATIC void |
| 480 | xfs_inode_item_unpin( |
| 481 | struct xfs_log_item *lip, |
| 482 | int remove) |
| 483 | { |
| 484 | struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; |
| 485 | |
| 486 | trace_xfs_inode_unpin(ip, _RET_IP_); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 487 | ASSERT(lip->li_buf || xfs_iflags_test(ip, XFS_ISTALE)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 488 | ASSERT(atomic_read(&ip->i_pincount) > 0); |
| 489 | if (atomic_dec_and_test(&ip->i_pincount)) |
| 490 | wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT); |
| 491 | } |
| 492 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 493 | STATIC uint |
| 494 | xfs_inode_item_push( |
| 495 | struct xfs_log_item *lip, |
| 496 | struct list_head *buffer_list) |
| 497 | __releases(&lip->li_ailp->ail_lock) |
| 498 | __acquires(&lip->li_ailp->ail_lock) |
| 499 | { |
| 500 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 501 | struct xfs_inode *ip = iip->ili_inode; |
| 502 | struct xfs_buf *bp = lip->li_buf; |
| 503 | uint rval = XFS_ITEM_SUCCESS; |
| 504 | int error; |
| 505 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 506 | ASSERT(iip->ili_item.li_buf); |
| 507 | |
| 508 | if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) || |
| 509 | (ip->i_flags & XFS_ISTALE)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 510 | return XFS_ITEM_PINNED; |
| 511 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 512 | if (xfs_iflags_test(ip, XFS_IFLUSHING)) |
| 513 | return XFS_ITEM_FLUSHING; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 514 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 515 | if (!xfs_buf_trylock(bp)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 516 | return XFS_ITEM_LOCKED; |
| 517 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 518 | spin_unlock(&lip->li_ailp->ail_lock); |
| 519 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 520 | /* |
| 521 | * We need to hold a reference for flushing the cluster buffer as it may |
| 522 | * fail the buffer without IO submission. In which case, we better get a |
| 523 | * reference for that completion because otherwise we don't get a |
| 524 | * reference for IO until we queue the buffer for delwri submission. |
| 525 | */ |
| 526 | xfs_buf_hold(bp); |
| 527 | error = xfs_iflush_cluster(bp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 528 | if (!error) { |
| 529 | if (!xfs_buf_delwri_queue(bp, buffer_list)) |
| 530 | rval = XFS_ITEM_FLUSHING; |
| 531 | xfs_buf_relse(bp); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 532 | } else { |
| 533 | /* |
| 534 | * Release the buffer if we were unable to flush anything. On |
| 535 | * any other error, the buffer has already been released. |
| 536 | */ |
| 537 | if (error == -EAGAIN) |
| 538 | xfs_buf_relse(bp); |
| 539 | rval = XFS_ITEM_LOCKED; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 540 | } |
| 541 | |
| 542 | spin_lock(&lip->li_ailp->ail_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 543 | return rval; |
| 544 | } |
| 545 | |
| 546 | /* |
| 547 | * Unlock the inode associated with the inode log item. |
| 548 | */ |
| 549 | STATIC void |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 550 | xfs_inode_item_release( |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 551 | struct xfs_log_item *lip) |
| 552 | { |
| 553 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 554 | struct xfs_inode *ip = iip->ili_inode; |
| 555 | unsigned short lock_flags; |
| 556 | |
| 557 | ASSERT(ip->i_itemp != NULL); |
| 558 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
| 559 | |
| 560 | lock_flags = iip->ili_lock_flags; |
| 561 | iip->ili_lock_flags = 0; |
| 562 | if (lock_flags) |
| 563 | xfs_iunlock(ip, lock_flags); |
| 564 | } |
| 565 | |
| 566 | /* |
| 567 | * This is called to find out where the oldest active copy of the inode log |
| 568 | * item in the on disk log resides now that the last log write of it completed |
| 569 | * at the given lsn. Since we always re-log all dirty data in an inode, the |
| 570 | * latest copy in the on disk log is the only one that matters. Therefore, |
| 571 | * simply return the given lsn. |
| 572 | * |
| 573 | * If the inode has been marked stale because the cluster is being freed, we |
| 574 | * don't want to (re-)insert this inode into the AIL. There is a race condition |
| 575 | * where the cluster buffer may be unpinned before the inode is inserted into |
| 576 | * the AIL during transaction committed processing. If the buffer is unpinned |
| 577 | * before the inode item has been committed and inserted, then it is possible |
| 578 | * for the buffer to be written and IO completes before the inode is inserted |
| 579 | * into the AIL. In that case, we'd be inserting a clean, stale inode into the |
| 580 | * AIL which will never get removed. It will, however, get reclaimed which |
| 581 | * triggers an assert in xfs_inode_free() complaining about freein an inode |
| 582 | * still in the AIL. |
| 583 | * |
| 584 | * To avoid this, just unpin the inode directly and return a LSN of -1 so the |
| 585 | * transaction committed code knows that it does not need to do any further |
| 586 | * processing on the item. |
| 587 | */ |
| 588 | STATIC xfs_lsn_t |
| 589 | xfs_inode_item_committed( |
| 590 | struct xfs_log_item *lip, |
| 591 | xfs_lsn_t lsn) |
| 592 | { |
| 593 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 594 | struct xfs_inode *ip = iip->ili_inode; |
| 595 | |
| 596 | if (xfs_iflags_test(ip, XFS_ISTALE)) { |
| 597 | xfs_inode_item_unpin(lip, 0); |
| 598 | return -1; |
| 599 | } |
| 600 | return lsn; |
| 601 | } |
| 602 | |
| 603 | STATIC void |
| 604 | xfs_inode_item_committing( |
| 605 | struct xfs_log_item *lip, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 606 | xfs_lsn_t commit_lsn) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 607 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 608 | INODE_ITEM(lip)->ili_last_lsn = commit_lsn; |
| 609 | return xfs_inode_item_release(lip); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 610 | } |
| 611 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 612 | static const struct xfs_item_ops xfs_inode_item_ops = { |
| 613 | .iop_size = xfs_inode_item_size, |
| 614 | .iop_format = xfs_inode_item_format, |
| 615 | .iop_pin = xfs_inode_item_pin, |
| 616 | .iop_unpin = xfs_inode_item_unpin, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 617 | .iop_release = xfs_inode_item_release, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 618 | .iop_committed = xfs_inode_item_committed, |
| 619 | .iop_push = xfs_inode_item_push, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 620 | .iop_committing = xfs_inode_item_committing, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 621 | }; |
| 622 | |
| 623 | |
| 624 | /* |
| 625 | * Initialize the inode log item for a newly allocated (in-core) inode. |
| 626 | */ |
| 627 | void |
| 628 | xfs_inode_item_init( |
| 629 | struct xfs_inode *ip, |
| 630 | struct xfs_mount *mp) |
| 631 | { |
| 632 | struct xfs_inode_log_item *iip; |
| 633 | |
| 634 | ASSERT(ip->i_itemp == NULL); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 635 | iip = ip->i_itemp = kmem_cache_zalloc(xfs_ili_zone, |
| 636 | GFP_KERNEL | __GFP_NOFAIL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 637 | |
| 638 | iip->ili_inode = ip; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 639 | spin_lock_init(&iip->ili_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 640 | xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE, |
| 641 | &xfs_inode_item_ops); |
| 642 | } |
| 643 | |
| 644 | /* |
| 645 | * Free the inode log item and any memory hanging off of it. |
| 646 | */ |
| 647 | void |
| 648 | xfs_inode_item_destroy( |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 649 | struct xfs_inode *ip) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 650 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 651 | struct xfs_inode_log_item *iip = ip->i_itemp; |
| 652 | |
| 653 | ASSERT(iip->ili_item.li_buf == NULL); |
| 654 | |
| 655 | ip->i_itemp = NULL; |
| 656 | kmem_free(iip->ili_item.li_lv_shadow); |
| 657 | kmem_cache_free(xfs_ili_zone, iip); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 658 | } |
| 659 | |
| 660 | |
| 661 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 662 | * We only want to pull the item from the AIL if it is actually there |
| 663 | * and its location in the log has not changed since we started the |
| 664 | * flush. Thus, we only bother if the inode's lsn has not changed. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 665 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 666 | static void |
| 667 | xfs_iflush_ail_updates( |
| 668 | struct xfs_ail *ailp, |
| 669 | struct list_head *list) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 670 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 671 | struct xfs_log_item *lip; |
| 672 | xfs_lsn_t tail_lsn = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 673 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 674 | /* this is an opencoded batch version of xfs_trans_ail_delete */ |
| 675 | spin_lock(&ailp->ail_lock); |
| 676 | list_for_each_entry(lip, list, li_bio_list) { |
| 677 | xfs_lsn_t lsn; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 678 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 679 | clear_bit(XFS_LI_FAILED, &lip->li_flags); |
| 680 | if (INODE_ITEM(lip)->ili_flush_lsn != lip->li_lsn) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 681 | continue; |
| 682 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 683 | lsn = xfs_ail_delete_one(ailp, lip); |
| 684 | if (!tail_lsn && lsn) |
| 685 | tail_lsn = lsn; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 686 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 687 | xfs_ail_update_finish(ailp, tail_lsn); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 688 | } |
| 689 | |
| 690 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 691 | * Walk the list of inodes that have completed their IOs. If they are clean |
| 692 | * remove them from the list and dissociate them from the buffer. Buffers that |
| 693 | * are still dirty remain linked to the buffer and on the list. Caller must |
| 694 | * handle them appropriately. |
| 695 | */ |
| 696 | static void |
| 697 | xfs_iflush_finish( |
| 698 | struct xfs_buf *bp, |
| 699 | struct list_head *list) |
| 700 | { |
| 701 | struct xfs_log_item *lip, *n; |
| 702 | |
| 703 | list_for_each_entry_safe(lip, n, list, li_bio_list) { |
| 704 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 705 | bool drop_buffer = false; |
| 706 | |
| 707 | spin_lock(&iip->ili_lock); |
| 708 | |
| 709 | /* |
| 710 | * Remove the reference to the cluster buffer if the inode is |
| 711 | * clean in memory and drop the buffer reference once we've |
| 712 | * dropped the locks we hold. |
| 713 | */ |
| 714 | ASSERT(iip->ili_item.li_buf == bp); |
| 715 | if (!iip->ili_fields) { |
| 716 | iip->ili_item.li_buf = NULL; |
| 717 | list_del_init(&lip->li_bio_list); |
| 718 | drop_buffer = true; |
| 719 | } |
| 720 | iip->ili_last_fields = 0; |
| 721 | iip->ili_flush_lsn = 0; |
| 722 | spin_unlock(&iip->ili_lock); |
| 723 | xfs_iflags_clear(iip->ili_inode, XFS_IFLUSHING); |
| 724 | if (drop_buffer) |
| 725 | xfs_buf_rele(bp); |
| 726 | } |
| 727 | } |
| 728 | |
| 729 | /* |
| 730 | * Inode buffer IO completion routine. It is responsible for removing inodes |
| 731 | * attached to the buffer from the AIL if they have not been re-logged and |
| 732 | * completing the inode flush. |
| 733 | */ |
| 734 | void |
| 735 | xfs_buf_inode_iodone( |
| 736 | struct xfs_buf *bp) |
| 737 | { |
| 738 | struct xfs_log_item *lip, *n; |
| 739 | LIST_HEAD(flushed_inodes); |
| 740 | LIST_HEAD(ail_updates); |
| 741 | |
| 742 | /* |
| 743 | * Pull the attached inodes from the buffer one at a time and take the |
| 744 | * appropriate action on them. |
| 745 | */ |
| 746 | list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { |
| 747 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 748 | |
| 749 | if (xfs_iflags_test(iip->ili_inode, XFS_ISTALE)) { |
| 750 | xfs_iflush_abort(iip->ili_inode); |
| 751 | continue; |
| 752 | } |
| 753 | if (!iip->ili_last_fields) |
| 754 | continue; |
| 755 | |
| 756 | /* Do an unlocked check for needing the AIL lock. */ |
| 757 | if (iip->ili_flush_lsn == lip->li_lsn || |
| 758 | test_bit(XFS_LI_FAILED, &lip->li_flags)) |
| 759 | list_move_tail(&lip->li_bio_list, &ail_updates); |
| 760 | else |
| 761 | list_move_tail(&lip->li_bio_list, &flushed_inodes); |
| 762 | } |
| 763 | |
| 764 | if (!list_empty(&ail_updates)) { |
| 765 | xfs_iflush_ail_updates(bp->b_mount->m_ail, &ail_updates); |
| 766 | list_splice_tail(&ail_updates, &flushed_inodes); |
| 767 | } |
| 768 | |
| 769 | xfs_iflush_finish(bp, &flushed_inodes); |
| 770 | if (!list_empty(&flushed_inodes)) |
| 771 | list_splice_tail(&flushed_inodes, &bp->b_li_list); |
| 772 | } |
| 773 | |
| 774 | void |
| 775 | xfs_buf_inode_io_fail( |
| 776 | struct xfs_buf *bp) |
| 777 | { |
| 778 | struct xfs_log_item *lip; |
| 779 | |
| 780 | list_for_each_entry(lip, &bp->b_li_list, li_bio_list) |
| 781 | set_bit(XFS_LI_FAILED, &lip->li_flags); |
| 782 | } |
| 783 | |
| 784 | /* |
| 785 | * This is the inode flushing abort routine. It is called when |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 786 | * the filesystem is shutting down to clean up the inode state. It is |
| 787 | * responsible for removing the inode item from the AIL if it has not been |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 788 | * re-logged and clearing the inode's flush state. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 789 | */ |
| 790 | void |
| 791 | xfs_iflush_abort( |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 792 | struct xfs_inode *ip) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 793 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 794 | struct xfs_inode_log_item *iip = ip->i_itemp; |
| 795 | struct xfs_buf *bp = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 796 | |
| 797 | if (iip) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 798 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 799 | * Clear the failed bit before removing the item from the AIL so |
| 800 | * xfs_trans_ail_delete() doesn't try to clear and release the |
| 801 | * buffer attached to the log item before we are done with it. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 802 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 803 | clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags); |
| 804 | xfs_trans_ail_delete(&iip->ili_item, 0); |
| 805 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 806 | /* |
| 807 | * Clear the inode logging fields so no more flushes are |
| 808 | * attempted. |
| 809 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 810 | spin_lock(&iip->ili_lock); |
| 811 | iip->ili_last_fields = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 812 | iip->ili_fields = 0; |
| 813 | iip->ili_fsync_fields = 0; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 814 | iip->ili_flush_lsn = 0; |
| 815 | bp = iip->ili_item.li_buf; |
| 816 | iip->ili_item.li_buf = NULL; |
| 817 | list_del_init(&iip->ili_item.li_bio_list); |
| 818 | spin_unlock(&iip->ili_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 819 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 820 | xfs_iflags_clear(ip, XFS_IFLUSHING); |
| 821 | if (bp) |
| 822 | xfs_buf_rele(bp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 823 | } |
| 824 | |
| 825 | /* |
| 826 | * convert an xfs_inode_log_format struct from the old 32 bit version |
| 827 | * (which can have different field alignments) to the native 64 bit version |
| 828 | */ |
| 829 | int |
| 830 | xfs_inode_item_format_convert( |
| 831 | struct xfs_log_iovec *buf, |
| 832 | struct xfs_inode_log_format *in_f) |
| 833 | { |
| 834 | struct xfs_inode_log_format_32 *in_f32 = buf->i_addr; |
| 835 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 836 | if (buf->i_len != sizeof(*in_f32)) { |
| 837 | XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 838 | return -EFSCORRUPTED; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 839 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 840 | |
| 841 | in_f->ilf_type = in_f32->ilf_type; |
| 842 | in_f->ilf_size = in_f32->ilf_size; |
| 843 | in_f->ilf_fields = in_f32->ilf_fields; |
| 844 | in_f->ilf_asize = in_f32->ilf_asize; |
| 845 | in_f->ilf_dsize = in_f32->ilf_dsize; |
| 846 | in_f->ilf_ino = in_f32->ilf_ino; |
| 847 | memcpy(&in_f->ilf_u, &in_f32->ilf_u, sizeof(in_f->ilf_u)); |
| 848 | in_f->ilf_blkno = in_f32->ilf_blkno; |
| 849 | in_f->ilf_len = in_f32->ilf_len; |
| 850 | in_f->ilf_boffset = in_f32->ilf_boffset; |
| 851 | return 0; |
| 852 | } |