diff options
| author | David Howells <[email protected]> | 2024-12-16 20:40:55 +0000 |
|---|---|---|
| committer | Christian Brauner <[email protected]> | 2024-12-20 21:34:02 +0000 |
| commit | 06fa229ceb36898e68022b5654c017d2c6582d7d (patch) | |
| tree | db540d16164012b660f6096819f5288356749f39 /fs/netfs/buffered_read.c | |
| parent | netfs: Add a tracepoint to log the lifespan of folio_queue structs (diff) | |
| download | kernel-06fa229ceb36898e68022b5654c017d2c6582d7d.tar.gz kernel-06fa229ceb36898e68022b5654c017d2c6582d7d.zip | |
netfs: Abstract out a rolling folio buffer implementation
A rolling buffer is a series of folios held in a list of folio_queues. New
folios and folio_queue structs may be inserted at the head simultaneously
with spent ones being removed from the tail without the need for locking.
The rolling buffer includes an iov_iter and it has to be careful managing
this as the list of folio_queues is extended such that an oops doesn't
incurred because the iterator was pointing to the end of a folio_queue
segment that got appended to and then removed.
We need to use the mechanism twice, once for read and once for write, and,
in future patches, we will use a second rolling buffer to handle bounce
buffering for content encryption.
Signed-off-by: David Howells <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
cc: Jeff Layton <[email protected]>
cc: [email protected]
cc: [email protected]
Signed-off-by: Christian Brauner <[email protected]>
Diffstat (limited to 'fs/netfs/buffered_read.c')
| -rw-r--r-- | fs/netfs/buffered_read.c | 117 |
1 files changed, 22 insertions, 95 deletions
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 7ec04d5628d8..db874fea8794 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -64,37 +64,6 @@ static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_in } /* - * Decant the list of folios to read into a rolling buffer. - */ -static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq, - struct folio_queue *folioq, - struct folio_batch *put_batch) -{ - unsigned int order, nr; - size_t size = 0; - - nr = __readahead_batch(rreq->ractl, (struct page **)folioq->vec.folios, - ARRAY_SIZE(folioq->vec.folios)); - folioq->vec.nr = nr; - for (int i = 0; i < nr; i++) { - struct folio *folio = folioq_folio(folioq, i); - - trace_netfs_folio(folio, netfs_folio_trace_read); - order = folio_order(folio); - folioq->orders[i] = order; - size += PAGE_SIZE << order; - - if (!folio_batch_add(put_batch, folio)) - folio_batch_release(put_batch); - } - - for (int i = nr; i < folioq_nr_slots(folioq); i++) - folioq_clear(folioq, i); - - return size; -} - -/* * netfs_prepare_read_iterator - Prepare the subreq iterator for I/O * @subreq: The subrequest to be set up * @@ -128,18 +97,12 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq) folio_batch_init(&put_batch); while (rreq->submitted < subreq->start + rsize) { - struct folio_queue *tail = rreq->buffer_tail, *new; - size_t added; + ssize_t added; - new = netfs_folioq_alloc(rreq->debug_id, GFP_NOFS, - netfs_trace_folioq_alloc_read_prep); - if (!new) - return -ENOMEM; - new->prev = tail; - tail->next = new; - rreq->buffer_tail = new; - added = netfs_load_buffer_from_ra(rreq, new, &put_batch); - rreq->iter.count += added; + added = rolling_buffer_load_from_ra(&rreq->buffer, rreq->ractl, + &put_batch); + if (added < 0) + return added; rreq->submitted += added; } folio_batch_release(&put_batch); @@ -147,7 +110,7 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq) subreq->len = rsize; if (unlikely(rreq->io_streams[0].sreq_max_segs)) { - size_t limit = netfs_limit_iter(&rreq->iter, 0, rsize, + size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize, rreq->io_streams[0].sreq_max_segs); if (limit < rsize) { @@ -156,20 +119,16 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq) } } - subreq->io_iter = rreq->iter; + subreq->io_iter = rreq->buffer.iter; if (iov_iter_is_folioq(&subreq->io_iter)) { - if (subreq->io_iter.folioq_slot >= folioq_nr_slots(subreq->io_iter.folioq)) { - subreq->io_iter.folioq = subreq->io_iter.folioq->next; - subreq->io_iter.folioq_slot = 0; - } subreq->curr_folioq = (struct folio_queue *)subreq->io_iter.folioq; subreq->curr_folioq_slot = subreq->io_iter.folioq_slot; subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot]; } iov_iter_truncate(&subreq->io_iter, subreq->len); - iov_iter_advance(&rreq->iter, subreq->len); + rolling_buffer_advance(&rreq->buffer, subreq->len); return subreq->len; } @@ -352,34 +311,6 @@ static int netfs_wait_for_read(struct netfs_io_request *rreq) return ret; } -/* - * Set up the initial folioq of buffer folios in the rolling buffer and set the - * iterator to refer to it. - */ -static int netfs_prime_buffer(struct netfs_io_request *rreq) -{ - struct folio_queue *folioq; - struct folio_batch put_batch; - size_t added; - - folioq = netfs_folioq_alloc(rreq->debug_id, GFP_KERNEL, - netfs_trace_folioq_alloc_read_prime); - if (!folioq) - return -ENOMEM; - - rreq->buffer = folioq; - rreq->buffer_tail = folioq; - rreq->submitted = rreq->start; - iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, 0); - - folio_batch_init(&put_batch); - added = netfs_load_buffer_from_ra(rreq, folioq, &put_batch); - folio_batch_release(&put_batch); - rreq->iter.count += added; - rreq->submitted += added; - return 0; -} - /** * netfs_readahead - Helper to manage a read request * @ractl: The description of the readahead request @@ -419,7 +350,8 @@ void netfs_readahead(struct readahead_control *ractl) netfs_rreq_expand(rreq, ractl); rreq->ractl = ractl; - if (netfs_prime_buffer(rreq) < 0) + rreq->submitted = rreq->start; + if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0) goto cleanup_free; netfs_read_to_pagecache(rreq); @@ -435,22 +367,18 @@ EXPORT_SYMBOL(netfs_readahead); /* * Create a rolling buffer with a single occupying folio. */ -static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio) +static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio, + unsigned int rollbuf_flags) { - struct folio_queue *folioq; + ssize_t added; - folioq = netfs_folioq_alloc(rreq->debug_id, GFP_KERNEL, - netfs_trace_folioq_alloc_read_sing); - if (!folioq) + if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0) return -ENOMEM; - folioq_append(folioq, folio); - BUG_ON(folioq_folio(folioq, 0) != folio); - BUG_ON(folioq_folio_order(folioq, 0) != folio_order(folio)); - rreq->buffer = folioq; - rreq->buffer_tail = folioq; - rreq->submitted = rreq->start + rreq->len; - iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, rreq->len); + added = rolling_buffer_append(&rreq->buffer, folio, rollbuf_flags); + if (added < 0) + return added; + rreq->submitted = rreq->start + added; rreq->ractl = (struct readahead_control *)1UL; return 0; } @@ -518,7 +446,7 @@ static int netfs_read_gaps(struct file *file, struct folio *folio) } if (to < flen) bvec_set_folio(&bvec[i++], folio, flen - to, to); - iov_iter_bvec(&rreq->iter, ITER_DEST, bvec, i, rreq->len); + iov_iter_bvec(&rreq->buffer.iter, ITER_DEST, bvec, i, rreq->len); rreq->submitted = rreq->start + flen; netfs_read_to_pagecache(rreq); @@ -586,7 +514,7 @@ int netfs_read_folio(struct file *file, struct folio *folio) trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); /* Set up the output buffer */ - ret = netfs_create_singular_buffer(rreq, folio); + ret = netfs_create_singular_buffer(rreq, folio, 0); if (ret < 0) goto discard; @@ -743,7 +671,7 @@ retry: trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin); /* Set up the output buffer */ - ret = netfs_create_singular_buffer(rreq, folio); + ret = netfs_create_singular_buffer(rreq, folio, 0); if (ret < 0) goto error_put; @@ -808,11 +736,10 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio, trace_netfs_read(rreq, start, flen, netfs_read_trace_prefetch_for_write); /* Set up the output buffer */ - ret = netfs_create_singular_buffer(rreq, folio); + ret = netfs_create_singular_buffer(rreq, folio, NETFS_ROLLBUF_PAGECACHE_MARK); if (ret < 0) goto error_put; - folioq_mark2(rreq->buffer, 0); netfs_read_to_pagecache(rreq); ret = netfs_wait_for_read(rreq); netfs_put_request(rreq, false, netfs_rreq_trace_put_return); |
