123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852 |
- /*
- * Copyright (C) 2016 CNEX Labs
- * Initial release: Javier Gonzalez <javier@cnexlabs.com>
- *
- * Based upon the circular ringbuffer.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * pblk-rb.c - pblk's write buffer
- */
- #include <linux/circ_buf.h>
- #include "pblk.h"
- static DECLARE_RWSEM(pblk_rb_lock);
- void pblk_rb_data_free(struct pblk_rb *rb)
- {
- struct pblk_rb_pages *p, *t;
- down_write(&pblk_rb_lock);
- list_for_each_entry_safe(p, t, &rb->pages, list) {
- free_pages((unsigned long)page_address(p->pages), p->order);
- list_del(&p->list);
- kfree(p);
- }
- up_write(&pblk_rb_lock);
- }
- /*
- * Initialize ring buffer. The data and metadata buffers must be previously
- * allocated and their size must be a power of two
- * (Documentation/core-api/circular-buffers.rst)
- */
- int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
- unsigned int power_size, unsigned int power_seg_sz)
- {
- struct pblk *pblk = container_of(rb, struct pblk, rwb);
- unsigned int init_entry = 0;
- unsigned int alloc_order = power_size;
- unsigned int max_order = MAX_ORDER - 1;
- unsigned int order, iter;
- down_write(&pblk_rb_lock);
- rb->entries = rb_entry_base;
- rb->seg_size = (1 << power_seg_sz);
- rb->nr_entries = (1 << power_size);
- rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
- rb->flush_point = EMPTY_ENTRY;
- spin_lock_init(&rb->w_lock);
- spin_lock_init(&rb->s_lock);
- INIT_LIST_HEAD(&rb->pages);
- if (alloc_order >= max_order) {
- order = max_order;
- iter = (1 << (alloc_order - max_order));
- } else {
- order = alloc_order;
- iter = 1;
- }
- do {
- struct pblk_rb_entry *entry;
- struct pblk_rb_pages *page_set;
- void *kaddr;
- unsigned long set_size;
- int i;
- page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
- if (!page_set) {
- up_write(&pblk_rb_lock);
- return -ENOMEM;
- }
- page_set->order = order;
- page_set->pages = alloc_pages(GFP_KERNEL, order);
- if (!page_set->pages) {
- kfree(page_set);
- pblk_rb_data_free(rb);
- up_write(&pblk_rb_lock);
- return -ENOMEM;
- }
- kaddr = page_address(page_set->pages);
- entry = &rb->entries[init_entry];
- entry->data = kaddr;
- entry->cacheline = pblk_cacheline_to_addr(init_entry++);
- entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
- set_size = (1 << order);
- for (i = 1; i < set_size; i++) {
- entry = &rb->entries[init_entry];
- entry->cacheline = pblk_cacheline_to_addr(init_entry++);
- entry->data = kaddr + (i * rb->seg_size);
- entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
- bio_list_init(&entry->w_ctx.bios);
- }
- list_add_tail(&page_set->list, &rb->pages);
- iter--;
- } while (iter > 0);
- up_write(&pblk_rb_lock);
- #ifdef CONFIG_NVM_PBLK_DEBUG
- atomic_set(&rb->inflight_flush_point, 0);
- #endif
- /*
- * Initialize rate-limiter, which controls access to the write buffer
- * but user and GC I/O
- */
- pblk_rl_init(&pblk->rl, rb->nr_entries);
- return 0;
- }
- /*
- * pblk_rb_calculate_size -- calculate the size of the write buffer
- */
- unsigned int pblk_rb_calculate_size(unsigned int nr_entries)
- {
- /* Alloc a write buffer that can at least fit 128 entries */
- return (1 << max(get_count_order(nr_entries), 7));
- }
- void *pblk_rb_entries_ref(struct pblk_rb *rb)
- {
- return rb->entries;
- }
- static void clean_wctx(struct pblk_w_ctx *w_ctx)
- {
- int flags;
- flags = READ_ONCE(w_ctx->flags);
- WARN_ONCE(!(flags & PBLK_SUBMITTED_ENTRY),
- "pblk: overwriting unsubmitted data\n");
- /* Release flags on context. Protect from writes and reads */
- smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY);
- pblk_ppa_set_empty(&w_ctx->ppa);
- w_ctx->lba = ADDR_EMPTY;
- }
- #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
- #define pblk_rb_ring_space(rb, head, tail, size) \
- (CIRC_SPACE(head, tail, size))
- /*
- * Buffer space is calculated with respect to the back pointer signaling
- * synchronized entries to the media.
- */
- static unsigned int pblk_rb_space(struct pblk_rb *rb)
- {
- unsigned int mem = READ_ONCE(rb->mem);
- unsigned int sync = READ_ONCE(rb->sync);
- return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries);
- }
- /*
- * Buffer count is calculated with respect to the submission entry signaling the
- * entries that are available to send to the media
- */
- unsigned int pblk_rb_read_count(struct pblk_rb *rb)
- {
- unsigned int mem = READ_ONCE(rb->mem);
- unsigned int subm = READ_ONCE(rb->subm);
- return pblk_rb_ring_count(mem, subm, rb->nr_entries);
- }
- unsigned int pblk_rb_sync_count(struct pblk_rb *rb)
- {
- unsigned int mem = READ_ONCE(rb->mem);
- unsigned int sync = READ_ONCE(rb->sync);
- return pblk_rb_ring_count(mem, sync, rb->nr_entries);
- }
- unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
- {
- unsigned int subm;
- subm = READ_ONCE(rb->subm);
- /* Commit read means updating submission pointer */
- smp_store_release(&rb->subm,
- (subm + nr_entries) & (rb->nr_entries - 1));
- return subm;
- }
- static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
- {
- struct pblk *pblk = container_of(rb, struct pblk, rwb);
- struct pblk_line *line;
- struct pblk_rb_entry *entry;
- struct pblk_w_ctx *w_ctx;
- unsigned int user_io = 0, gc_io = 0;
- unsigned int i;
- int flags;
- for (i = 0; i < to_update; i++) {
- entry = &rb->entries[rb->l2p_update];
- w_ctx = &entry->w_ctx;
- flags = READ_ONCE(entry->w_ctx.flags);
- if (flags & PBLK_IOTYPE_USER)
- user_io++;
- else if (flags & PBLK_IOTYPE_GC)
- gc_io++;
- else
- WARN(1, "pblk: unknown IO type\n");
- pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
- entry->cacheline);
- line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
- kref_put(&line->ref, pblk_line_put);
- clean_wctx(w_ctx);
- rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1);
- }
- pblk_rl_out(&pblk->rl, user_io, gc_io);
- return 0;
- }
- /*
- * When we move the l2p_update pointer, we update the l2p table - lookups will
- * point to the physical address instead of to the cacheline in the write buffer
- * from this moment on.
- */
- static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
- unsigned int mem, unsigned int sync)
- {
- unsigned int space, count;
- int ret = 0;
- lockdep_assert_held(&rb->w_lock);
- /* Update l2p only as buffer entries are being overwritten */
- space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries);
- if (space > nr_entries)
- goto out;
- count = nr_entries - space;
- /* l2p_update used exclusively under rb->w_lock */
- ret = __pblk_rb_update_l2p(rb, count);
- out:
- return ret;
- }
- /*
- * Update the l2p entry for all sectors stored on the write buffer. This means
- * that all future lookups to the l2p table will point to a device address, not
- * to the cacheline in the write buffer.
- */
- void pblk_rb_sync_l2p(struct pblk_rb *rb)
- {
- unsigned int sync;
- unsigned int to_update;
- spin_lock(&rb->w_lock);
- /* Protect from reads and writes */
- sync = smp_load_acquire(&rb->sync);
- to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
- __pblk_rb_update_l2p(rb, to_update);
- spin_unlock(&rb->w_lock);
- }
- /*
- * Write @nr_entries to ring buffer from @data buffer if there is enough space.
- * Typically, 4KB data chunks coming from a bio will be copied to the ring
- * buffer, thus the write will fail if not all incoming data can be copied.
- *
- */
- static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data,
- struct pblk_w_ctx w_ctx,
- struct pblk_rb_entry *entry)
- {
- memcpy(entry->data, data, rb->seg_size);
- entry->w_ctx.lba = w_ctx.lba;
- entry->w_ctx.ppa = w_ctx.ppa;
- }
- void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
- struct pblk_w_ctx w_ctx, unsigned int ring_pos)
- {
- struct pblk *pblk = container_of(rb, struct pblk, rwb);
- struct pblk_rb_entry *entry;
- int flags;
- entry = &rb->entries[ring_pos];
- flags = READ_ONCE(entry->w_ctx.flags);
- #ifdef CONFIG_NVM_PBLK_DEBUG
- /* Caller must guarantee that the entry is free */
- BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
- #endif
- __pblk_rb_write_entry(rb, data, w_ctx, entry);
- pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline);
- flags = w_ctx.flags | PBLK_WRITTEN_DATA;
- /* Release flags on write context. Protect from writes */
- smp_store_release(&entry->w_ctx.flags, flags);
- }
- void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
- struct pblk_w_ctx w_ctx, struct pblk_line *line,
- u64 paddr, unsigned int ring_pos)
- {
- struct pblk *pblk = container_of(rb, struct pblk, rwb);
- struct pblk_rb_entry *entry;
- int flags;
- entry = &rb->entries[ring_pos];
- flags = READ_ONCE(entry->w_ctx.flags);
- #ifdef CONFIG_NVM_PBLK_DEBUG
- /* Caller must guarantee that the entry is free */
- BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
- #endif
- __pblk_rb_write_entry(rb, data, w_ctx, entry);
- if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr))
- entry->w_ctx.lba = ADDR_EMPTY;
- flags = w_ctx.flags | PBLK_WRITTEN_DATA;
- /* Release flags on write context. Protect from writes */
- smp_store_release(&entry->w_ctx.flags, flags);
- }
- static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
- unsigned int pos)
- {
- struct pblk_rb_entry *entry;
- unsigned int sync, flush_point;
- pblk_rb_sync_init(rb, NULL);
- sync = READ_ONCE(rb->sync);
- if (pos == sync) {
- pblk_rb_sync_end(rb, NULL);
- return 0;
- }
- #ifdef CONFIG_NVM_PBLK_DEBUG
- atomic_inc(&rb->inflight_flush_point);
- #endif
- flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
- entry = &rb->entries[flush_point];
- /* Protect flush points */
- smp_store_release(&rb->flush_point, flush_point);
- if (bio)
- bio_list_add(&entry->w_ctx.bios, bio);
- pblk_rb_sync_end(rb, NULL);
- return bio ? 1 : 0;
- }
- static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
- unsigned int *pos)
- {
- unsigned int mem;
- unsigned int sync;
- sync = READ_ONCE(rb->sync);
- mem = READ_ONCE(rb->mem);
- if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < nr_entries)
- return 0;
- if (pblk_rb_update_l2p(rb, nr_entries, mem, sync))
- return 0;
- *pos = mem;
- return 1;
- }
- static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
- unsigned int *pos)
- {
- if (!__pblk_rb_may_write(rb, nr_entries, pos))
- return 0;
- /* Protect from read count */
- smp_store_release(&rb->mem, (*pos + nr_entries) & (rb->nr_entries - 1));
- return 1;
- }
- void pblk_rb_flush(struct pblk_rb *rb)
- {
- struct pblk *pblk = container_of(rb, struct pblk, rwb);
- unsigned int mem = READ_ONCE(rb->mem);
- if (pblk_rb_flush_point_set(rb, NULL, mem))
- return;
- pblk_write_kick(pblk);
- }
- static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
- unsigned int *pos, struct bio *bio,
- int *io_ret)
- {
- unsigned int mem;
- if (!__pblk_rb_may_write(rb, nr_entries, pos))
- return 0;
- mem = (*pos + nr_entries) & (rb->nr_entries - 1);
- *io_ret = NVM_IO_DONE;
- if (bio->bi_opf & REQ_PREFLUSH) {
- struct pblk *pblk = container_of(rb, struct pblk, rwb);
- atomic64_inc(&pblk->nr_flush);
- if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
- *io_ret = NVM_IO_OK;
- }
- /* Protect from read count */
- smp_store_release(&rb->mem, mem);
- return 1;
- }
- /*
- * Atomically check that (i) there is space on the write buffer for the
- * incoming I/O, and (ii) the current I/O type has enough budget in the write
- * buffer (rate-limiter).
- */
- int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
- unsigned int nr_entries, unsigned int *pos)
- {
- struct pblk *pblk = container_of(rb, struct pblk, rwb);
- int io_ret;
- spin_lock(&rb->w_lock);
- io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries);
- if (io_ret) {
- spin_unlock(&rb->w_lock);
- return io_ret;
- }
- if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) {
- spin_unlock(&rb->w_lock);
- return NVM_IO_REQUEUE;
- }
- pblk_rl_user_in(&pblk->rl, nr_entries);
- spin_unlock(&rb->w_lock);
- return io_ret;
- }
- /*
- * Look at pblk_rb_may_write_user comment
- */
- int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
- unsigned int *pos)
- {
- struct pblk *pblk = container_of(rb, struct pblk, rwb);
- spin_lock(&rb->w_lock);
- if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) {
- spin_unlock(&rb->w_lock);
- return 0;
- }
- if (!pblk_rb_may_write(rb, nr_entries, pos)) {
- spin_unlock(&rb->w_lock);
- return 0;
- }
- pblk_rl_gc_in(&pblk->rl, nr_entries);
- spin_unlock(&rb->w_lock);
- return 1;
- }
- /*
- * Read available entries on rb and add them to the given bio. To avoid a memory
- * copy, a page reference to the write buffer is used to be added to the bio.
- *
- * This function is used by the write thread to form the write bio that will
- * persist data on the write buffer to the media.
- */
- unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
- unsigned int pos, unsigned int nr_entries,
- unsigned int count)
- {
- struct pblk *pblk = container_of(rb, struct pblk, rwb);
- struct request_queue *q = pblk->dev->q;
- struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
- struct bio *bio = rqd->bio;
- struct pblk_rb_entry *entry;
- struct page *page;
- unsigned int pad = 0, to_read = nr_entries;
- unsigned int i;
- int flags;
- if (count < nr_entries) {
- pad = nr_entries - count;
- to_read = count;
- }
- c_ctx->sentry = pos;
- c_ctx->nr_valid = to_read;
- c_ctx->nr_padded = pad;
- for (i = 0; i < to_read; i++) {
- entry = &rb->entries[pos];
- /* A write has been allowed into the buffer, but data is still
- * being copied to it. It is ok to busy wait.
- */
- try:
- flags = READ_ONCE(entry->w_ctx.flags);
- if (!(flags & PBLK_WRITTEN_DATA)) {
- io_schedule();
- goto try;
- }
- page = virt_to_page(entry->data);
- if (!page) {
- pblk_err(pblk, "could not allocate write bio page\n");
- flags &= ~PBLK_WRITTEN_DATA;
- flags |= PBLK_SUBMITTED_ENTRY;
- /* Release flags on context. Protect from writes */
- smp_store_release(&entry->w_ctx.flags, flags);
- return NVM_IO_ERR;
- }
- if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
- rb->seg_size) {
- pblk_err(pblk, "could not add page to write bio\n");
- flags &= ~PBLK_WRITTEN_DATA;
- flags |= PBLK_SUBMITTED_ENTRY;
- /* Release flags on context. Protect from writes */
- smp_store_release(&entry->w_ctx.flags, flags);
- return NVM_IO_ERR;
- }
- flags &= ~PBLK_WRITTEN_DATA;
- flags |= PBLK_SUBMITTED_ENTRY;
- /* Release flags on context. Protect from writes */
- smp_store_release(&entry->w_ctx.flags, flags);
- pos = (pos + 1) & (rb->nr_entries - 1);
- }
- if (pad) {
- if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
- pblk_err(pblk, "could not pad page in write bio\n");
- return NVM_IO_ERR;
- }
- if (pad < pblk->min_write_pgs)
- atomic64_inc(&pblk->pad_dist[pad - 1]);
- else
- pblk_warn(pblk, "padding more than min. sectors\n");
- atomic64_add(pad, &pblk->pad_wa);
- }
- #ifdef CONFIG_NVM_PBLK_DEBUG
- atomic_long_add(pad, &pblk->padded_writes);
- #endif
- return NVM_IO_OK;
- }
- /*
- * Copy to bio only if the lba matches the one on the given cache entry.
- * Otherwise, it means that the entry has been overwritten, and the bio should
- * be directed to disk.
- */
- int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter, bool advanced_bio)
- {
- struct pblk *pblk = container_of(rb, struct pblk, rwb);
- struct pblk_rb_entry *entry;
- struct pblk_w_ctx *w_ctx;
- struct ppa_addr l2p_ppa;
- u64 pos = pblk_addr_to_cacheline(ppa);
- void *data;
- int flags;
- int ret = 1;
- #ifdef CONFIG_NVM_PBLK_DEBUG
- /* Caller must ensure that the access will not cause an overflow */
- BUG_ON(pos >= rb->nr_entries);
- #endif
- entry = &rb->entries[pos];
- w_ctx = &entry->w_ctx;
- flags = READ_ONCE(w_ctx->flags);
- spin_lock(&rb->w_lock);
- spin_lock(&pblk->trans_lock);
- l2p_ppa = pblk_trans_map_get(pblk, lba);
- spin_unlock(&pblk->trans_lock);
- /* Check if the entry has been overwritten or is scheduled to be */
- if (!pblk_ppa_comp(l2p_ppa, ppa) || w_ctx->lba != lba ||
- flags & PBLK_WRITABLE_ENTRY) {
- ret = 0;
- goto out;
- }
- /* Only advance the bio if it hasn't been advanced already. If advanced,
- * this bio is at least a partial bio (i.e., it has partially been
- * filled with data from the cache). If part of the data resides on the
- * media, we will read later on
- */
- if (unlikely(!advanced_bio))
- bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
- data = bio_data(bio);
- memcpy(data, entry->data, rb->seg_size);
- out:
- spin_unlock(&rb->w_lock);
- return ret;
- }
- struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos)
- {
- unsigned int entry = pos & (rb->nr_entries - 1);
- return &rb->entries[entry].w_ctx;
- }
- unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags)
- __acquires(&rb->s_lock)
- {
- if (flags)
- spin_lock_irqsave(&rb->s_lock, *flags);
- else
- spin_lock_irq(&rb->s_lock);
- return rb->sync;
- }
- void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
- __releases(&rb->s_lock)
- {
- lockdep_assert_held(&rb->s_lock);
- if (flags)
- spin_unlock_irqrestore(&rb->s_lock, *flags);
- else
- spin_unlock_irq(&rb->s_lock);
- }
- unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
- {
- unsigned int sync, flush_point;
- lockdep_assert_held(&rb->s_lock);
- sync = READ_ONCE(rb->sync);
- flush_point = READ_ONCE(rb->flush_point);
- if (flush_point != EMPTY_ENTRY) {
- unsigned int secs_to_flush;
- secs_to_flush = pblk_rb_ring_count(flush_point, sync,
- rb->nr_entries);
- if (secs_to_flush < nr_entries) {
- /* Protect flush points */
- smp_store_release(&rb->flush_point, EMPTY_ENTRY);
- }
- }
- sync = (sync + nr_entries) & (rb->nr_entries - 1);
- /* Protect from counts */
- smp_store_release(&rb->sync, sync);
- return sync;
- }
- /* Calculate how many sectors to submit up to the current flush point. */
- unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
- {
- unsigned int subm, sync, flush_point;
- unsigned int submitted, to_flush;
- /* Protect flush points */
- flush_point = smp_load_acquire(&rb->flush_point);
- if (flush_point == EMPTY_ENTRY)
- return 0;
- /* Protect syncs */
- sync = smp_load_acquire(&rb->sync);
- subm = READ_ONCE(rb->subm);
- submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
- /* The sync point itself counts as a sector to sync */
- to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
- return (submitted < to_flush) ? (to_flush - submitted) : 0;
- }
- /*
- * Scan from the current position of the sync pointer to find the entry that
- * corresponds to the given ppa. This is necessary since write requests can be
- * completed out of order. The assumption is that the ppa is close to the sync
- * pointer thus the search will not take long.
- *
- * The caller of this function must guarantee that the sync pointer will no
- * reach the entry while it is using the metadata associated with it. With this
- * assumption in mind, there is no need to take the sync lock.
- */
- struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
- struct ppa_addr *ppa)
- {
- unsigned int sync, subm, count;
- unsigned int i;
- sync = READ_ONCE(rb->sync);
- subm = READ_ONCE(rb->subm);
- count = pblk_rb_ring_count(subm, sync, rb->nr_entries);
- for (i = 0; i < count; i++)
- sync = (sync + 1) & (rb->nr_entries - 1);
- return NULL;
- }
- int pblk_rb_tear_down_check(struct pblk_rb *rb)
- {
- struct pblk_rb_entry *entry;
- int i;
- int ret = 0;
- spin_lock(&rb->w_lock);
- spin_lock_irq(&rb->s_lock);
- if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
- (rb->sync == rb->l2p_update) &&
- (rb->flush_point == EMPTY_ENTRY)) {
- goto out;
- }
- if (!rb->entries) {
- ret = 1;
- goto out;
- }
- for (i = 0; i < rb->nr_entries; i++) {
- entry = &rb->entries[i];
- if (!entry->data) {
- ret = 1;
- goto out;
- }
- }
- out:
- spin_unlock_irq(&rb->s_lock);
- spin_unlock(&rb->w_lock);
- return ret;
- }
- unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos)
- {
- return (pos & (rb->nr_entries - 1));
- }
- int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos)
- {
- return (pos >= rb->nr_entries);
- }
- ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
- {
- struct pblk *pblk = container_of(rb, struct pblk, rwb);
- struct pblk_c_ctx *c;
- ssize_t offset;
- int queued_entries = 0;
- spin_lock_irq(&rb->s_lock);
- list_for_each_entry(c, &pblk->compl_list, list)
- queued_entries++;
- spin_unlock_irq(&rb->s_lock);
- if (rb->flush_point != EMPTY_ENTRY)
- offset = scnprintf(buf, PAGE_SIZE,
- "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
- rb->nr_entries,
- rb->mem,
- rb->subm,
- rb->sync,
- rb->l2p_update,
- #ifdef CONFIG_NVM_PBLK_DEBUG
- atomic_read(&rb->inflight_flush_point),
- #else
- 0,
- #endif
- rb->flush_point,
- pblk_rb_read_count(rb),
- pblk_rb_space(rb),
- pblk_rb_flush_point_count(rb),
- queued_entries);
- else
- offset = scnprintf(buf, PAGE_SIZE,
- "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
- rb->nr_entries,
- rb->mem,
- rb->subm,
- rb->sync,
- rb->l2p_update,
- #ifdef CONFIG_NVM_PBLK_DEBUG
- atomic_read(&rb->inflight_flush_point),
- #else
- 0,
- #endif
- pblk_rb_read_count(rb),
- pblk_rb_space(rb),
- pblk_rb_flush_point_count(rb),
- queued_entries);
- return offset;
- }
|