Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2016 CNEX Labs |
| 3 | * Initial release: Javier Gonzalez <javier@cnexlabs.com> |
| 4 | * Matias Bjorling <matias@cnexlabs.com> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License version |
| 8 | * 2 as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License for more details. |
| 14 | * |
| 15 | * pblk-write.c - pblk's write path from write buffer to media |
| 16 | */ |
| 17 | |
| 18 | #include "pblk.h" |
| 19 | |
| 20 | static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd, |
| 21 | struct pblk_c_ctx *c_ctx) |
| 22 | { |
| 23 | struct bio *original_bio; |
| 24 | struct pblk_rb *rwb = &pblk->rwb; |
| 25 | unsigned long ret; |
| 26 | int i; |
| 27 | |
| 28 | for (i = 0; i < c_ctx->nr_valid; i++) { |
| 29 | struct pblk_w_ctx *w_ctx; |
| 30 | int pos = c_ctx->sentry + i; |
| 31 | int flags; |
| 32 | |
| 33 | w_ctx = pblk_rb_w_ctx(rwb, pos); |
| 34 | flags = READ_ONCE(w_ctx->flags); |
| 35 | |
| 36 | if (flags & PBLK_FLUSH_ENTRY) { |
| 37 | flags &= ~PBLK_FLUSH_ENTRY; |
| 38 | /* Release flags on context. Protect from writes */ |
| 39 | smp_store_release(&w_ctx->flags, flags); |
| 40 | |
| 41 | #ifdef CONFIG_NVM_PBLK_DEBUG |
| 42 | atomic_dec(&rwb->inflight_flush_point); |
| 43 | #endif |
| 44 | } |
| 45 | |
| 46 | while ((original_bio = bio_list_pop(&w_ctx->bios))) |
| 47 | bio_endio(original_bio); |
| 48 | } |
| 49 | |
| 50 | if (c_ctx->nr_padded) |
| 51 | pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid, |
| 52 | c_ctx->nr_padded); |
| 53 | |
| 54 | #ifdef CONFIG_NVM_PBLK_DEBUG |
| 55 | atomic_long_add(rqd->nr_ppas, &pblk->sync_writes); |
| 56 | #endif |
| 57 | |
| 58 | ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid); |
| 59 | |
| 60 | bio_put(rqd->bio); |
| 61 | pblk_free_rqd(pblk, rqd, PBLK_WRITE); |
| 62 | |
| 63 | return ret; |
| 64 | } |
| 65 | |
| 66 | static unsigned long pblk_end_queued_w_bio(struct pblk *pblk, |
| 67 | struct nvm_rq *rqd, |
| 68 | struct pblk_c_ctx *c_ctx) |
| 69 | { |
| 70 | list_del(&c_ctx->list); |
| 71 | return pblk_end_w_bio(pblk, rqd, c_ctx); |
| 72 | } |
| 73 | |
| 74 | static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd, |
| 75 | struct pblk_c_ctx *c_ctx) |
| 76 | { |
| 77 | struct pblk_c_ctx *c, *r; |
| 78 | unsigned long flags; |
| 79 | unsigned long pos; |
| 80 | |
| 81 | #ifdef CONFIG_NVM_PBLK_DEBUG |
| 82 | atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes); |
| 83 | #endif |
| 84 | |
| 85 | pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap); |
| 86 | |
| 87 | pos = pblk_rb_sync_init(&pblk->rwb, &flags); |
| 88 | if (pos == c_ctx->sentry) { |
| 89 | pos = pblk_end_w_bio(pblk, rqd, c_ctx); |
| 90 | |
| 91 | retry: |
| 92 | list_for_each_entry_safe(c, r, &pblk->compl_list, list) { |
| 93 | rqd = nvm_rq_from_c_ctx(c); |
| 94 | if (c->sentry == pos) { |
| 95 | pos = pblk_end_queued_w_bio(pblk, rqd, c); |
| 96 | goto retry; |
| 97 | } |
| 98 | } |
| 99 | } else { |
| 100 | WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd); |
| 101 | list_add_tail(&c_ctx->list, &pblk->compl_list); |
| 102 | } |
| 103 | pblk_rb_sync_end(&pblk->rwb, &flags); |
| 104 | } |
| 105 | |
| 106 | /* Map remaining sectors in chunk, starting from ppa */ |
| 107 | static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa) |
| 108 | { |
| 109 | struct nvm_tgt_dev *dev = pblk->dev; |
| 110 | struct nvm_geo *geo = &dev->geo; |
| 111 | struct pblk_line *line; |
| 112 | struct ppa_addr map_ppa = *ppa; |
| 113 | u64 paddr; |
| 114 | int done = 0; |
| 115 | |
| 116 | line = &pblk->lines[pblk_ppa_to_line(*ppa)]; |
| 117 | spin_lock(&line->lock); |
| 118 | |
| 119 | while (!done) { |
| 120 | paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa); |
| 121 | |
| 122 | if (!test_and_set_bit(paddr, line->map_bitmap)) |
| 123 | line->left_msecs--; |
| 124 | |
| 125 | if (!test_and_set_bit(paddr, line->invalid_bitmap)) |
| 126 | le32_add_cpu(line->vsc, -1); |
| 127 | |
| 128 | if (geo->version == NVM_OCSSD_SPEC_12) { |
| 129 | map_ppa.ppa++; |
| 130 | if (map_ppa.g.pg == geo->num_pg) |
| 131 | done = 1; |
| 132 | } else { |
| 133 | map_ppa.m.sec++; |
| 134 | if (map_ppa.m.sec == geo->clba) |
| 135 | done = 1; |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | line->w_err_gc->has_write_err = 1; |
| 140 | spin_unlock(&line->lock); |
| 141 | } |
| 142 | |
| 143 | static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry, |
| 144 | unsigned int nr_entries) |
| 145 | { |
| 146 | struct pblk_rb *rb = &pblk->rwb; |
| 147 | struct pblk_rb_entry *entry; |
| 148 | struct pblk_line *line; |
| 149 | struct pblk_w_ctx *w_ctx; |
| 150 | struct ppa_addr ppa_l2p; |
| 151 | int flags; |
| 152 | unsigned int pos, i; |
| 153 | |
| 154 | spin_lock(&pblk->trans_lock); |
| 155 | pos = sentry; |
| 156 | for (i = 0; i < nr_entries; i++) { |
| 157 | entry = &rb->entries[pos]; |
| 158 | w_ctx = &entry->w_ctx; |
| 159 | |
| 160 | /* Check if the lba has been overwritten */ |
| 161 | ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba); |
| 162 | if (!pblk_ppa_comp(ppa_l2p, entry->cacheline)) |
| 163 | w_ctx->lba = ADDR_EMPTY; |
| 164 | |
| 165 | /* Mark up the entry as submittable again */ |
| 166 | flags = READ_ONCE(w_ctx->flags); |
| 167 | flags |= PBLK_WRITTEN_DATA; |
| 168 | /* Release flags on write context. Protect from writes */ |
| 169 | smp_store_release(&w_ctx->flags, flags); |
| 170 | |
| 171 | /* Decrese the reference count to the line as we will |
| 172 | * re-map these entries |
| 173 | */ |
| 174 | line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)]; |
| 175 | kref_put(&line->ref, pblk_line_put); |
| 176 | |
| 177 | pos = (pos + 1) & (rb->nr_entries - 1); |
| 178 | } |
| 179 | spin_unlock(&pblk->trans_lock); |
| 180 | } |
| 181 | |
| 182 | static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx) |
| 183 | { |
| 184 | struct pblk_c_ctx *r_ctx; |
| 185 | |
| 186 | r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL); |
| 187 | if (!r_ctx) |
| 188 | return; |
| 189 | |
| 190 | r_ctx->lun_bitmap = NULL; |
| 191 | r_ctx->sentry = c_ctx->sentry; |
| 192 | r_ctx->nr_valid = c_ctx->nr_valid; |
| 193 | r_ctx->nr_padded = c_ctx->nr_padded; |
| 194 | |
| 195 | spin_lock(&pblk->resubmit_lock); |
| 196 | list_add_tail(&r_ctx->list, &pblk->resubmit_list); |
| 197 | spin_unlock(&pblk->resubmit_lock); |
| 198 | |
| 199 | #ifdef CONFIG_NVM_PBLK_DEBUG |
| 200 | atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes); |
| 201 | #endif |
| 202 | } |
| 203 | |
| 204 | static void pblk_submit_rec(struct work_struct *work) |
| 205 | { |
| 206 | struct pblk_rec_ctx *recovery = |
| 207 | container_of(work, struct pblk_rec_ctx, ws_rec); |
| 208 | struct pblk *pblk = recovery->pblk; |
| 209 | struct nvm_rq *rqd = recovery->rqd; |
| 210 | struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); |
| 211 | struct ppa_addr *ppa_list; |
| 212 | |
| 213 | pblk_log_write_err(pblk, rqd); |
| 214 | |
| 215 | if (rqd->nr_ppas == 1) |
| 216 | ppa_list = &rqd->ppa_addr; |
| 217 | else |
| 218 | ppa_list = rqd->ppa_list; |
| 219 | |
| 220 | pblk_map_remaining(pblk, ppa_list); |
| 221 | pblk_queue_resubmit(pblk, c_ctx); |
| 222 | |
| 223 | pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap); |
| 224 | if (c_ctx->nr_padded) |
| 225 | pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid, |
| 226 | c_ctx->nr_padded); |
| 227 | bio_put(rqd->bio); |
| 228 | pblk_free_rqd(pblk, rqd, PBLK_WRITE); |
| 229 | mempool_free(recovery, &pblk->rec_pool); |
| 230 | |
| 231 | atomic_dec(&pblk->inflight_io); |
| 232 | } |
| 233 | |
| 234 | |
| 235 | static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd) |
| 236 | { |
| 237 | struct pblk_rec_ctx *recovery; |
| 238 | |
| 239 | recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC); |
| 240 | if (!recovery) { |
| 241 | pblk_err(pblk, "could not allocate recovery work\n"); |
| 242 | return; |
| 243 | } |
| 244 | |
| 245 | recovery->pblk = pblk; |
| 246 | recovery->rqd = rqd; |
| 247 | |
| 248 | INIT_WORK(&recovery->ws_rec, pblk_submit_rec); |
| 249 | queue_work(pblk->close_wq, &recovery->ws_rec); |
| 250 | } |
| 251 | |
| 252 | static void pblk_end_io_write(struct nvm_rq *rqd) |
| 253 | { |
| 254 | struct pblk *pblk = rqd->private; |
| 255 | struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); |
| 256 | |
| 257 | if (rqd->error) { |
| 258 | pblk_end_w_fail(pblk, rqd); |
| 259 | return; |
| 260 | } |
| 261 | #ifdef CONFIG_NVM_PBLK_DEBUG |
| 262 | else |
| 263 | WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n"); |
| 264 | #endif |
| 265 | |
| 266 | pblk_complete_write(pblk, rqd, c_ctx); |
| 267 | atomic_dec(&pblk->inflight_io); |
| 268 | } |
| 269 | |
| 270 | static void pblk_end_io_write_meta(struct nvm_rq *rqd) |
| 271 | { |
| 272 | struct pblk *pblk = rqd->private; |
| 273 | struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd); |
| 274 | struct pblk_line *line = m_ctx->private; |
| 275 | struct pblk_emeta *emeta = line->emeta; |
| 276 | int sync; |
| 277 | |
| 278 | pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); |
| 279 | |
| 280 | if (rqd->error) { |
| 281 | pblk_log_write_err(pblk, rqd); |
| 282 | pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id); |
| 283 | line->w_err_gc->has_write_err = 1; |
| 284 | } |
| 285 | |
| 286 | sync = atomic_add_return(rqd->nr_ppas, &emeta->sync); |
| 287 | if (sync == emeta->nr_entries) |
| 288 | pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws, |
| 289 | GFP_ATOMIC, pblk->close_wq); |
| 290 | |
| 291 | pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); |
| 292 | |
| 293 | atomic_dec(&pblk->inflight_io); |
| 294 | } |
| 295 | |
| 296 | static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd, |
| 297 | unsigned int nr_secs, |
| 298 | nvm_end_io_fn(*end_io)) |
| 299 | { |
| 300 | struct nvm_tgt_dev *dev = pblk->dev; |
| 301 | |
| 302 | /* Setup write request */ |
| 303 | rqd->opcode = NVM_OP_PWRITE; |
| 304 | rqd->nr_ppas = nr_secs; |
| 305 | rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE); |
| 306 | rqd->private = pblk; |
| 307 | rqd->end_io = end_io; |
| 308 | |
| 309 | rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, |
| 310 | &rqd->dma_meta_list); |
| 311 | if (!rqd->meta_list) |
| 312 | return -ENOMEM; |
| 313 | |
| 314 | rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size; |
| 315 | rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size; |
| 316 | |
| 317 | return 0; |
| 318 | } |
| 319 | |
| 320 | static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd, |
| 321 | struct ppa_addr *erase_ppa) |
| 322 | { |
| 323 | struct pblk_line_meta *lm = &pblk->lm; |
| 324 | struct pblk_line *e_line = pblk_line_get_erase(pblk); |
| 325 | struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); |
| 326 | unsigned int valid = c_ctx->nr_valid; |
| 327 | unsigned int padded = c_ctx->nr_padded; |
| 328 | unsigned int nr_secs = valid + padded; |
| 329 | unsigned long *lun_bitmap; |
| 330 | int ret; |
| 331 | |
| 332 | lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL); |
| 333 | if (!lun_bitmap) |
| 334 | return -ENOMEM; |
| 335 | c_ctx->lun_bitmap = lun_bitmap; |
| 336 | |
| 337 | ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write); |
| 338 | if (ret) { |
| 339 | kfree(lun_bitmap); |
| 340 | return ret; |
| 341 | } |
| 342 | |
| 343 | if (likely(!e_line || !atomic_read(&e_line->left_eblks))) |
| 344 | pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0); |
| 345 | else |
| 346 | pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, |
| 347 | valid, erase_ppa); |
| 348 | |
| 349 | return 0; |
| 350 | } |
| 351 | |
| 352 | static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail, |
| 353 | unsigned int secs_to_flush) |
| 354 | { |
| 355 | int secs_to_sync; |
| 356 | |
| 357 | secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush); |
| 358 | |
| 359 | #ifdef CONFIG_NVM_PBLK_DEBUG |
| 360 | if ((!secs_to_sync && secs_to_flush) |
| 361 | || (secs_to_sync < 0) |
| 362 | || (secs_to_sync > secs_avail && !secs_to_flush)) { |
| 363 | pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n", |
| 364 | secs_avail, secs_to_sync, secs_to_flush); |
| 365 | } |
| 366 | #endif |
| 367 | |
| 368 | return secs_to_sync; |
| 369 | } |
| 370 | |
| 371 | int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) |
| 372 | { |
| 373 | struct nvm_tgt_dev *dev = pblk->dev; |
| 374 | struct nvm_geo *geo = &dev->geo; |
| 375 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; |
| 376 | struct pblk_line_meta *lm = &pblk->lm; |
| 377 | struct pblk_emeta *emeta = meta_line->emeta; |
| 378 | struct pblk_g_ctx *m_ctx; |
| 379 | struct bio *bio; |
| 380 | struct nvm_rq *rqd; |
| 381 | void *data; |
| 382 | u64 paddr; |
| 383 | int rq_ppas = pblk->min_write_pgs; |
| 384 | int id = meta_line->id; |
| 385 | int rq_len; |
| 386 | int i, j; |
| 387 | int ret; |
| 388 | |
| 389 | rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT); |
| 390 | |
| 391 | m_ctx = nvm_rq_to_pdu(rqd); |
| 392 | m_ctx->private = meta_line; |
| 393 | |
| 394 | rq_len = rq_ppas * geo->csecs; |
| 395 | data = ((void *)emeta->buf) + emeta->mem; |
| 396 | |
| 397 | bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, |
| 398 | l_mg->emeta_alloc_type, GFP_KERNEL); |
| 399 | if (IS_ERR(bio)) { |
| 400 | pblk_err(pblk, "failed to map emeta io"); |
| 401 | ret = PTR_ERR(bio); |
| 402 | goto fail_free_rqd; |
| 403 | } |
| 404 | bio->bi_iter.bi_sector = 0; /* internal bio */ |
| 405 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
| 406 | rqd->bio = bio; |
| 407 | |
| 408 | ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta); |
| 409 | if (ret) |
| 410 | goto fail_free_bio; |
| 411 | |
| 412 | for (i = 0; i < rqd->nr_ppas; ) { |
| 413 | spin_lock(&meta_line->lock); |
| 414 | paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas); |
| 415 | spin_unlock(&meta_line->lock); |
| 416 | for (j = 0; j < rq_ppas; j++, i++, paddr++) |
| 417 | rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id); |
| 418 | } |
| 419 | |
| 420 | spin_lock(&l_mg->close_lock); |
| 421 | emeta->mem += rq_len; |
| 422 | if (emeta->mem >= lm->emeta_len[0]) |
| 423 | list_del(&meta_line->list); |
| 424 | spin_unlock(&l_mg->close_lock); |
| 425 | |
| 426 | pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas); |
| 427 | |
| 428 | ret = pblk_submit_io(pblk, rqd); |
| 429 | if (ret) { |
| 430 | pblk_err(pblk, "emeta I/O submission failed: %d\n", ret); |
| 431 | goto fail_rollback; |
| 432 | } |
| 433 | |
| 434 | return NVM_IO_OK; |
| 435 | |
| 436 | fail_rollback: |
| 437 | pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); |
| 438 | spin_lock(&l_mg->close_lock); |
| 439 | pblk_dealloc_page(pblk, meta_line, rq_ppas); |
| 440 | list_add(&meta_line->list, &meta_line->list); |
| 441 | spin_unlock(&l_mg->close_lock); |
| 442 | fail_free_bio: |
| 443 | bio_put(bio); |
| 444 | fail_free_rqd: |
| 445 | pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); |
| 446 | return ret; |
| 447 | } |
| 448 | |
| 449 | static inline bool pblk_valid_meta_ppa(struct pblk *pblk, |
| 450 | struct pblk_line *meta_line, |
| 451 | struct nvm_rq *data_rqd) |
| 452 | { |
| 453 | struct nvm_tgt_dev *dev = pblk->dev; |
| 454 | struct nvm_geo *geo = &dev->geo; |
| 455 | struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd); |
| 456 | struct pblk_line *data_line = pblk_line_get_data(pblk); |
| 457 | struct ppa_addr ppa, ppa_opt; |
| 458 | u64 paddr; |
| 459 | int pos_opt; |
| 460 | |
| 461 | /* Schedule a metadata I/O that is half the distance from the data I/O |
| 462 | * with regards to the number of LUNs forming the pblk instance. This |
| 463 | * balances LUN conflicts across every I/O. |
| 464 | * |
| 465 | * When the LUN configuration changes (e.g., due to GC), this distance |
| 466 | * can align, which would result on metadata and data I/Os colliding. In |
| 467 | * this case, modify the distance to not be optimal, but move the |
| 468 | * optimal in the right direction. |
| 469 | */ |
| 470 | paddr = pblk_lookup_page(pblk, meta_line); |
| 471 | ppa = addr_to_gen_ppa(pblk, paddr, 0); |
| 472 | ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0); |
| 473 | pos_opt = pblk_ppa_to_pos(geo, ppa_opt); |
| 474 | |
| 475 | if (test_bit(pos_opt, data_c_ctx->lun_bitmap) || |
| 476 | test_bit(pos_opt, data_line->blk_bitmap)) |
| 477 | return true; |
| 478 | |
| 479 | if (unlikely(pblk_ppa_comp(ppa_opt, ppa))) |
| 480 | data_line->meta_distance--; |
| 481 | |
| 482 | return false; |
| 483 | } |
| 484 | |
| 485 | static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk, |
| 486 | struct nvm_rq *data_rqd) |
| 487 | { |
| 488 | struct pblk_line_meta *lm = &pblk->lm; |
| 489 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; |
| 490 | struct pblk_line *meta_line; |
| 491 | |
| 492 | spin_lock(&l_mg->close_lock); |
| 493 | if (list_empty(&l_mg->emeta_list)) { |
| 494 | spin_unlock(&l_mg->close_lock); |
| 495 | return NULL; |
| 496 | } |
| 497 | meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list); |
| 498 | if (meta_line->emeta->mem >= lm->emeta_len[0]) { |
| 499 | spin_unlock(&l_mg->close_lock); |
| 500 | return NULL; |
| 501 | } |
| 502 | spin_unlock(&l_mg->close_lock); |
| 503 | |
| 504 | if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd)) |
| 505 | return NULL; |
| 506 | |
| 507 | return meta_line; |
| 508 | } |
| 509 | |
| 510 | static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd) |
| 511 | { |
| 512 | struct ppa_addr erase_ppa; |
| 513 | struct pblk_line *meta_line; |
| 514 | int err; |
| 515 | |
| 516 | pblk_ppa_set_empty(&erase_ppa); |
| 517 | |
| 518 | /* Assign lbas to ppas and populate request structure */ |
| 519 | err = pblk_setup_w_rq(pblk, rqd, &erase_ppa); |
| 520 | if (err) { |
| 521 | pblk_err(pblk, "could not setup write request: %d\n", err); |
| 522 | return NVM_IO_ERR; |
| 523 | } |
| 524 | |
| 525 | meta_line = pblk_should_submit_meta_io(pblk, rqd); |
| 526 | |
| 527 | /* Submit data write for current data line */ |
| 528 | err = pblk_submit_io(pblk, rqd); |
| 529 | if (err) { |
| 530 | pblk_err(pblk, "data I/O submission failed: %d\n", err); |
| 531 | return NVM_IO_ERR; |
| 532 | } |
| 533 | |
| 534 | if (!pblk_ppa_empty(erase_ppa)) { |
| 535 | /* Submit erase for next data line */ |
| 536 | if (pblk_blk_erase_async(pblk, erase_ppa)) { |
| 537 | struct pblk_line *e_line = pblk_line_get_erase(pblk); |
| 538 | struct nvm_tgt_dev *dev = pblk->dev; |
| 539 | struct nvm_geo *geo = &dev->geo; |
| 540 | int bit; |
| 541 | |
| 542 | atomic_inc(&e_line->left_eblks); |
| 543 | bit = pblk_ppa_to_pos(geo, erase_ppa); |
| 544 | WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap)); |
| 545 | } |
| 546 | } |
| 547 | |
| 548 | if (meta_line) { |
| 549 | /* Submit metadata write for previous data line */ |
| 550 | err = pblk_submit_meta_io(pblk, meta_line); |
| 551 | if (err) { |
| 552 | pblk_err(pblk, "metadata I/O submission failed: %d", |
| 553 | err); |
| 554 | return NVM_IO_ERR; |
| 555 | } |
| 556 | } |
| 557 | |
| 558 | return NVM_IO_OK; |
| 559 | } |
| 560 | |
| 561 | static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd) |
| 562 | { |
| 563 | struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); |
| 564 | struct bio *bio = rqd->bio; |
| 565 | |
| 566 | if (c_ctx->nr_padded) |
| 567 | pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid, |
| 568 | c_ctx->nr_padded); |
| 569 | } |
| 570 | |
| 571 | static int pblk_submit_write(struct pblk *pblk) |
| 572 | { |
| 573 | struct bio *bio; |
| 574 | struct nvm_rq *rqd; |
| 575 | unsigned int secs_avail, secs_to_sync, secs_to_com; |
| 576 | unsigned int secs_to_flush; |
| 577 | unsigned long pos; |
| 578 | unsigned int resubmit; |
| 579 | |
| 580 | spin_lock(&pblk->resubmit_lock); |
| 581 | resubmit = !list_empty(&pblk->resubmit_list); |
| 582 | spin_unlock(&pblk->resubmit_lock); |
| 583 | |
| 584 | /* Resubmit failed writes first */ |
| 585 | if (resubmit) { |
| 586 | struct pblk_c_ctx *r_ctx; |
| 587 | |
| 588 | spin_lock(&pblk->resubmit_lock); |
| 589 | r_ctx = list_first_entry(&pblk->resubmit_list, |
| 590 | struct pblk_c_ctx, list); |
| 591 | list_del(&r_ctx->list); |
| 592 | spin_unlock(&pblk->resubmit_lock); |
| 593 | |
| 594 | secs_avail = r_ctx->nr_valid; |
| 595 | pos = r_ctx->sentry; |
| 596 | |
| 597 | pblk_prepare_resubmit(pblk, pos, secs_avail); |
| 598 | secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, |
| 599 | secs_avail); |
| 600 | |
| 601 | kfree(r_ctx); |
| 602 | } else { |
| 603 | /* If there are no sectors in the cache, |
| 604 | * flushes (bios without data) will be cleared on |
| 605 | * the cache threads |
| 606 | */ |
| 607 | secs_avail = pblk_rb_read_count(&pblk->rwb); |
| 608 | if (!secs_avail) |
| 609 | return 1; |
| 610 | |
| 611 | secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb); |
| 612 | if (!secs_to_flush && secs_avail < pblk->min_write_pgs) |
| 613 | return 1; |
| 614 | |
| 615 | secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, |
| 616 | secs_to_flush); |
| 617 | if (secs_to_sync > pblk->max_write_pgs) { |
| 618 | pblk_err(pblk, "bad buffer sync calculation\n"); |
| 619 | return 1; |
| 620 | } |
| 621 | |
| 622 | secs_to_com = (secs_to_sync > secs_avail) ? |
| 623 | secs_avail : secs_to_sync; |
| 624 | pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com); |
| 625 | } |
| 626 | |
| 627 | bio = bio_alloc(GFP_KERNEL, secs_to_sync); |
| 628 | |
| 629 | bio->bi_iter.bi_sector = 0; /* internal bio */ |
| 630 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
| 631 | |
| 632 | rqd = pblk_alloc_rqd(pblk, PBLK_WRITE); |
| 633 | rqd->bio = bio; |
| 634 | |
| 635 | if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync, |
| 636 | secs_avail)) { |
| 637 | pblk_err(pblk, "corrupted write bio\n"); |
| 638 | goto fail_put_bio; |
| 639 | } |
| 640 | |
| 641 | if (pblk_submit_io_set(pblk, rqd)) |
| 642 | goto fail_free_bio; |
| 643 | |
| 644 | #ifdef CONFIG_NVM_PBLK_DEBUG |
| 645 | atomic_long_add(secs_to_sync, &pblk->sub_writes); |
| 646 | #endif |
| 647 | |
| 648 | return 0; |
| 649 | |
| 650 | fail_free_bio: |
| 651 | pblk_free_write_rqd(pblk, rqd); |
| 652 | fail_put_bio: |
| 653 | bio_put(bio); |
| 654 | pblk_free_rqd(pblk, rqd, PBLK_WRITE); |
| 655 | |
| 656 | return 1; |
| 657 | } |
| 658 | |
| 659 | int pblk_write_ts(void *data) |
| 660 | { |
| 661 | struct pblk *pblk = data; |
| 662 | |
| 663 | while (!kthread_should_stop()) { |
| 664 | if (!pblk_submit_write(pblk)) |
| 665 | continue; |
| 666 | set_current_state(TASK_INTERRUPTIBLE); |
| 667 | io_schedule(); |
| 668 | } |
| 669 | |
| 670 | return 0; |
| 671 | } |