From 0fb1d255e4be46c04ff86e84eddf230e18f8a598 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 29 Nov 2007 13:11:21 -0600 Subject: [PATCH 12/13] support partial blk_rq_copy_user_iov transfers st.c supports the ability to read/write from an interface buffer. In this mode the driver will send the device a command to transfer X bytes, but it may only transfer Y bytes to userspace. The app could then read/write again, but the data would only be transferred from the kernel bufer since the first command had transferred extra bytes to the kernel buffer. This patch modifies the blk/bio helpers to support this model, by allowing the callers to call the functions that transfer data to/from kernel buffers multiple times from different offsets. Signed-off-by: Mike Christie --- block/ll_rw_blk.c | 33 +++++++++++++++++++++++++++++---- drivers/scsi/sg.c | 5 +++++ fs/bio.c | 39 +++++++++++++++++++++++++++++---------- 3 files changed, 63 insertions(+), 14 deletions(-) diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 61b0118..646d88f 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2469,8 +2469,15 @@ int blk_rq_setup_buffer(struct bio_set *bs, struct request *rq, struct bio *bio = NULL; int ret; - if (len > (q->max_hw_sectors << 9)) - return -EINVAL; + /* + * st.c may call this multiple times to copy/map in multiple user + * buffers + */ + if (rq->data_len + len > (q->max_hw_sectors << 9)) { + ret = -EINVAL; + bio = rq->bio; + goto unmap_rq; + } while (bytes_read != len) { ret = __blk_rq_setup_buffer(bs, rq, ubuf, len - bytes_read, @@ -2516,8 +2523,16 @@ int blk_rq_copy_user_iov(struct bio_set *bs, struct request *rq, struct sg_iovec *iov, int iov_count, unsigned long len, gfp_t gfp_mask) { + struct bio *copy_start = NULL; int ret; + /* + * if this is called multiple times, then we want to start the copy + * from where we left off with the last iovec + */ + if (rq->bio) + copy_start = rq->biotail; + ret = blk_rq_setup_buffer(bs, rq, NULL, len, gfp_mask); if (ret) return ret; @@ -2525,7 +2540,8 @@ int blk_rq_copy_user_iov(struct bio_set *bs, struct request *rq, if (rq_data_dir(rq) == READ) return 0; - ret = bio_copy_user_iov(rq->bio, iov, iov_count); + ret = bio_copy_user_iov(copy_start ? copy_start : rq->bio, iov, + iov_count); if (ret) goto fail; return 0; @@ -2543,8 +2559,12 @@ int blk_rq_uncopy_user_iov(struct bio *bio, struct sg_iovec *iov, if (!bio) return 0; - if (bio_data_dir(bio) == READ) + if (bio_data_dir(bio) == READ) { ret = bio_copy_user_iov(bio, iov, iov_count); + if (ret > 0) + /* not yet done */ + return ret; + } blk_rq_destroy_buffer(bio); return ret; } @@ -2767,6 +2787,11 @@ int blk_rq_complete_transfer(struct bio *bio, void __user *ubuf, iov.iov_base = ubuf; iov.iov_len = len; ret = blk_rq_uncopy_user_iov(bio, &iov, 1); + if (ret > 0) { + /* we do not care about partial transfers */ + blk_rq_destroy_buffer(bio); + ret = 0; + } } return ret; } diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8adf7ea..b58c9c8 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1691,6 +1691,11 @@ sg_read_xfer(Sg_request * srp) } res = blk_rq_uncopy_user_iov(srp->bio, u_iov, iovec_count); + if (res > 0) { + /* we do not care about partial transfers */ + blk_rq_complete_transfer(srp->bio, NULL, 0); + res = 0; + } kfree(u_iov); } else if (!(new_interface && (SG_FLAG_MMAP_IO & hp->flags))) /* diff --git a/fs/bio.c b/fs/bio.c index a8bbc78..c9b369b 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -410,38 +410,55 @@ struct bio *bioset_setup_pages(struct request_queue *q, struct bio_set *bs, return bio; } +/** + * bio_copy_user_iov - copy data to/from user buf + * @head: start of bios to copy to/from + * @iov: iovec of buffers + * @iov_count: number of iovecs + * + * The may be called multiple times to copy from/to multiple iovecs (st + * may may need to copy from multiple buffers and not have them all at + * one time). This will return 0 on success, -XYZ on error and the number + * of bytes that were copied if there is more bio data to copy. + */ int bio_copy_user_iov(struct bio *head, struct sg_iovec *iov, int iov_count) { unsigned int iov_len = 0; + unsigned total_copied = 0; int ret, i, iov_index = 0; struct bio *bio; struct bio_map_data *bmd; char __user *p = NULL; - if (!iov || !iov_count) + /* + * If we do not get a iov or the first iov is bad, then just + * drop the transfer. + */ + if (!iov || !iov_count || + !iov->iov_base || !iov->iov_len) return 0; for (bio = head; bio; bio = bio->bi_next) { bmd = bio->bi_private; for (i = 0; i < bmd->nr_vecs; i++) { - unsigned copy_bytes, bvec_offset = 0, bvec_len; + unsigned copy_bytes, bvec_offset = 0; struct page *page; char *addr; - page = bmd->iovecs[i].page; - bvec_len = bmd->iovecs[i].len; + if (!bmd->iovecs[i].len) + continue; + page = bmd->iovecs[i].page; continue_from_bvec: addr = page_address(page) + bvec_offset; if (!p) { if (iov_index == iov_count) /* - * caller wanted a buffer larger - * than transfer + * more data in bio buffers, but caller + * did not want to transfer yet */ - break; - + return total_copied; p = iov[iov_index].iov_base; iov_len = iov[iov_index].iov_len; if (!p || !iov_len) { @@ -455,7 +472,7 @@ continue_from_bvec: } } - copy_bytes = min(iov_len, bvec_len - bvec_offset); + copy_bytes = min(iov_len, bmd->iovecs[i].len); if (bio_data_dir(head) == READ) ret = copy_to_user(p, addr, copy_bytes); else @@ -463,12 +480,14 @@ continue_from_bvec: if (ret) return -EFAULT; + total_copied += copy_bytes; + bmd->iovecs[i].len -= copy_bytes; bvec_offset += copy_bytes; iov_len -= copy_bytes; if (iov_len == 0) { p = NULL; iov_index++; - if (bvec_offset < bvec_len) + if (bmd->iovecs[i].len) goto continue_from_bvec; } else p += copy_bytes; -- 1.5.4.1