12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613 |
- #include <linux/export.h>
- #include <linux/bvec.h>
- #include <linux/uio.h>
- #include <linux/pagemap.h>
- #include <linux/slab.h>
- #include <linux/vmalloc.h>
- #include <linux/splice.h>
- #include <net/checksum.h>
- #define PIPE_PARANOIA /* for now */
- #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
- size_t left; \
- size_t wanted = n; \
- __p = i->iov; \
- __v.iov_len = min(n, __p->iov_len - skip); \
- if (likely(__v.iov_len)) { \
- __v.iov_base = __p->iov_base + skip; \
- left = (STEP); \
- __v.iov_len -= left; \
- skip += __v.iov_len; \
- n -= __v.iov_len; \
- } else { \
- left = 0; \
- } \
- while (unlikely(!left && n)) { \
- __p++; \
- __v.iov_len = min(n, __p->iov_len); \
- if (unlikely(!__v.iov_len)) \
- continue; \
- __v.iov_base = __p->iov_base; \
- left = (STEP); \
- __v.iov_len -= left; \
- skip = __v.iov_len; \
- n -= __v.iov_len; \
- } \
- n = wanted - n; \
- }
- #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
- size_t wanted = n; \
- __p = i->kvec; \
- __v.iov_len = min(n, __p->iov_len - skip); \
- if (likely(__v.iov_len)) { \
- __v.iov_base = __p->iov_base + skip; \
- (void)(STEP); \
- skip += __v.iov_len; \
- n -= __v.iov_len; \
- } \
- while (unlikely(n)) { \
- __p++; \
- __v.iov_len = min(n, __p->iov_len); \
- if (unlikely(!__v.iov_len)) \
- continue; \
- __v.iov_base = __p->iov_base; \
- (void)(STEP); \
- skip = __v.iov_len; \
- n -= __v.iov_len; \
- } \
- n = wanted; \
- }
- #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
- struct bvec_iter __start; \
- __start.bi_size = n; \
- __start.bi_bvec_done = skip; \
- __start.bi_idx = 0; \
- for_each_bvec(__v, i->bvec, __bi, __start) { \
- if (!__v.bv_len) \
- continue; \
- (void)(STEP); \
- } \
- }
- #define iterate_all_kinds(i, n, v, I, B, K) { \
- if (likely(n)) { \
- size_t skip = i->iov_offset; \
- if (unlikely(i->type & ITER_BVEC)) { \
- struct bio_vec v; \
- struct bvec_iter __bi; \
- iterate_bvec(i, n, v, __bi, skip, (B)) \
- } else if (unlikely(i->type & ITER_KVEC)) { \
- const struct kvec *kvec; \
- struct kvec v; \
- iterate_kvec(i, n, v, kvec, skip, (K)) \
- } else { \
- const struct iovec *iov; \
- struct iovec v; \
- iterate_iovec(i, n, v, iov, skip, (I)) \
- } \
- } \
- }
- #define iterate_and_advance(i, n, v, I, B, K) { \
- if (unlikely(i->count < n)) \
- n = i->count; \
- if (i->count) { \
- size_t skip = i->iov_offset; \
- if (unlikely(i->type & ITER_BVEC)) { \
- const struct bio_vec *bvec = i->bvec; \
- struct bio_vec v; \
- struct bvec_iter __bi; \
- iterate_bvec(i, n, v, __bi, skip, (B)) \
- i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
- i->nr_segs -= i->bvec - bvec; \
- skip = __bi.bi_bvec_done; \
- } else if (unlikely(i->type & ITER_KVEC)) { \
- const struct kvec *kvec; \
- struct kvec v; \
- iterate_kvec(i, n, v, kvec, skip, (K)) \
- if (skip == kvec->iov_len) { \
- kvec++; \
- skip = 0; \
- } \
- i->nr_segs -= kvec - i->kvec; \
- i->kvec = kvec; \
- } else { \
- const struct iovec *iov; \
- struct iovec v; \
- iterate_iovec(i, n, v, iov, skip, (I)) \
- if (skip == iov->iov_len) { \
- iov++; \
- skip = 0; \
- } \
- i->nr_segs -= iov - i->iov; \
- i->iov = iov; \
- } \
- i->count -= n; \
- i->iov_offset = skip; \
- } \
- }
- static int copyout(void __user *to, const void *from, size_t n)
- {
- if (access_ok(VERIFY_WRITE, to, n)) {
- kasan_check_read(from, n);
- n = raw_copy_to_user(to, from, n);
- }
- return n;
- }
- static int copyin(void *to, const void __user *from, size_t n)
- {
- if (access_ok(VERIFY_READ, from, n)) {
- kasan_check_write(to, n);
- n = raw_copy_from_user(to, from, n);
- }
- return n;
- }
- static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
- {
- size_t skip, copy, left, wanted;
- const struct iovec *iov;
- char __user *buf;
- void *kaddr, *from;
- if (unlikely(bytes > i->count))
- bytes = i->count;
- if (unlikely(!bytes))
- return 0;
- might_fault();
- wanted = bytes;
- iov = i->iov;
- skip = i->iov_offset;
- buf = iov->iov_base + skip;
- copy = min(bytes, iov->iov_len - skip);
- if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
- kaddr = kmap_atomic(page);
- from = kaddr + offset;
- /* first chunk, usually the only one */
- left = copyout(buf, from, copy);
- copy -= left;
- skip += copy;
- from += copy;
- bytes -= copy;
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyout(buf, from, copy);
- copy -= left;
- skip = copy;
- from += copy;
- bytes -= copy;
- }
- if (likely(!bytes)) {
- kunmap_atomic(kaddr);
- goto done;
- }
- offset = from - kaddr;
- buf += copy;
- kunmap_atomic(kaddr);
- copy = min(bytes, iov->iov_len - skip);
- }
- /* Too bad - revert to non-atomic kmap */
- kaddr = kmap(page);
- from = kaddr + offset;
- left = copyout(buf, from, copy);
- copy -= left;
- skip += copy;
- from += copy;
- bytes -= copy;
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyout(buf, from, copy);
- copy -= left;
- skip = copy;
- from += copy;
- bytes -= copy;
- }
- kunmap(page);
- done:
- if (skip == iov->iov_len) {
- iov++;
- skip = 0;
- }
- i->count -= wanted - bytes;
- i->nr_segs -= iov - i->iov;
- i->iov = iov;
- i->iov_offset = skip;
- return wanted - bytes;
- }
- static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
- {
- size_t skip, copy, left, wanted;
- const struct iovec *iov;
- char __user *buf;
- void *kaddr, *to;
- if (unlikely(bytes > i->count))
- bytes = i->count;
- if (unlikely(!bytes))
- return 0;
- might_fault();
- wanted = bytes;
- iov = i->iov;
- skip = i->iov_offset;
- buf = iov->iov_base + skip;
- copy = min(bytes, iov->iov_len - skip);
- if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
- kaddr = kmap_atomic(page);
- to = kaddr + offset;
- /* first chunk, usually the only one */
- left = copyin(to, buf, copy);
- copy -= left;
- skip += copy;
- to += copy;
- bytes -= copy;
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyin(to, buf, copy);
- copy -= left;
- skip = copy;
- to += copy;
- bytes -= copy;
- }
- if (likely(!bytes)) {
- kunmap_atomic(kaddr);
- goto done;
- }
- offset = to - kaddr;
- buf += copy;
- kunmap_atomic(kaddr);
- copy = min(bytes, iov->iov_len - skip);
- }
- /* Too bad - revert to non-atomic kmap */
- kaddr = kmap(page);
- to = kaddr + offset;
- left = copyin(to, buf, copy);
- copy -= left;
- skip += copy;
- to += copy;
- bytes -= copy;
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyin(to, buf, copy);
- copy -= left;
- skip = copy;
- to += copy;
- bytes -= copy;
- }
- kunmap(page);
- done:
- if (skip == iov->iov_len) {
- iov++;
- skip = 0;
- }
- i->count -= wanted - bytes;
- i->nr_segs -= iov - i->iov;
- i->iov = iov;
- i->iov_offset = skip;
- return wanted - bytes;
- }
- #ifdef PIPE_PARANOIA
- static bool sanity(const struct iov_iter *i)
- {
- struct pipe_inode_info *pipe = i->pipe;
- int idx = i->idx;
- int next = pipe->curbuf + pipe->nrbufs;
- if (i->iov_offset) {
- struct pipe_buffer *p;
- if (unlikely(!pipe->nrbufs))
- goto Bad; // pipe must be non-empty
- if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
- goto Bad; // must be at the last buffer...
- p = &pipe->bufs[idx];
- if (unlikely(p->offset + p->len != i->iov_offset))
- goto Bad; // ... at the end of segment
- } else {
- if (idx != (next & (pipe->buffers - 1)))
- goto Bad; // must be right after the last buffer
- }
- return true;
- Bad:
- printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
- printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
- pipe->curbuf, pipe->nrbufs, pipe->buffers);
- for (idx = 0; idx < pipe->buffers; idx++)
- printk(KERN_ERR "[%p %p %d %d]\n",
- pipe->bufs[idx].ops,
- pipe->bufs[idx].page,
- pipe->bufs[idx].offset,
- pipe->bufs[idx].len);
- WARN_ON(1);
- return false;
- }
- #else
- #define sanity(i) true
- #endif
- static inline int next_idx(int idx, struct pipe_inode_info *pipe)
- {
- return (idx + 1) & (pipe->buffers - 1);
- }
- static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
- {
- struct pipe_inode_info *pipe = i->pipe;
- struct pipe_buffer *buf;
- size_t off;
- int idx;
- if (unlikely(bytes > i->count))
- bytes = i->count;
- if (unlikely(!bytes))
- return 0;
- if (!sanity(i))
- return 0;
- off = i->iov_offset;
- idx = i->idx;
- buf = &pipe->bufs[idx];
- if (off) {
- if (offset == off && buf->page == page) {
- /* merge with the last one */
- buf->len += bytes;
- i->iov_offset += bytes;
- goto out;
- }
- idx = next_idx(idx, pipe);
- buf = &pipe->bufs[idx];
- }
- if (idx == pipe->curbuf && pipe->nrbufs)
- return 0;
- pipe->nrbufs++;
- buf->ops = &page_cache_pipe_buf_ops;
- get_page(buf->page = page);
- buf->offset = offset;
- buf->len = bytes;
- i->iov_offset = offset + bytes;
- i->idx = idx;
- out:
- i->count -= bytes;
- return bytes;
- }
- /*
- * Fault in one or more iovecs of the given iov_iter, to a maximum length of
- * bytes. For each iovec, fault in each page that constitutes the iovec.
- *
- * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
- * because it is an invalid address).
- */
- int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
- {
- size_t skip = i->iov_offset;
- const struct iovec *iov;
- int err;
- struct iovec v;
- if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
- iterate_iovec(i, bytes, v, iov, skip, ({
- err = fault_in_pages_readable(v.iov_base, v.iov_len);
- if (unlikely(err))
- return err;
- 0;}))
- }
- return 0;
- }
- EXPORT_SYMBOL(iov_iter_fault_in_readable);
- void iov_iter_init(struct iov_iter *i, int direction,
- const struct iovec *iov, unsigned long nr_segs,
- size_t count)
- {
- /* It will get better. Eventually... */
- if (uaccess_kernel()) {
- direction |= ITER_KVEC;
- i->type = direction;
- i->kvec = (struct kvec *)iov;
- } else {
- i->type = direction;
- i->iov = iov;
- }
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
- }
- EXPORT_SYMBOL(iov_iter_init);
- static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
- {
- char *from = kmap_atomic(page);
- memcpy(to, from + offset, len);
- kunmap_atomic(from);
- }
- static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
- {
- char *to = kmap_atomic(page);
- memcpy(to + offset, from, len);
- kunmap_atomic(to);
- }
- static void memzero_page(struct page *page, size_t offset, size_t len)
- {
- char *addr = kmap_atomic(page);
- memset(addr + offset, 0, len);
- kunmap_atomic(addr);
- }
- static inline bool allocated(struct pipe_buffer *buf)
- {
- return buf->ops == &default_pipe_buf_ops;
- }
- static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
- {
- size_t off = i->iov_offset;
- int idx = i->idx;
- if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
- idx = next_idx(idx, i->pipe);
- off = 0;
- }
- *idxp = idx;
- *offp = off;
- }
- static size_t push_pipe(struct iov_iter *i, size_t size,
- int *idxp, size_t *offp)
- {
- struct pipe_inode_info *pipe = i->pipe;
- size_t off;
- int idx;
- ssize_t left;
- if (unlikely(size > i->count))
- size = i->count;
- if (unlikely(!size))
- return 0;
- left = size;
- data_start(i, &idx, &off);
- *idxp = idx;
- *offp = off;
- if (off) {
- left -= PAGE_SIZE - off;
- if (left <= 0) {
- pipe->bufs[idx].len += size;
- return size;
- }
- pipe->bufs[idx].len = PAGE_SIZE;
- idx = next_idx(idx, pipe);
- }
- while (idx != pipe->curbuf || !pipe->nrbufs) {
- struct page *page = alloc_page(GFP_USER);
- if (!page)
- break;
- pipe->nrbufs++;
- pipe->bufs[idx].ops = &default_pipe_buf_ops;
- pipe->bufs[idx].page = page;
- pipe->bufs[idx].offset = 0;
- if (left <= PAGE_SIZE) {
- pipe->bufs[idx].len = left;
- return size;
- }
- pipe->bufs[idx].len = PAGE_SIZE;
- left -= PAGE_SIZE;
- idx = next_idx(idx, pipe);
- }
- return size - left;
- }
- static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
- struct iov_iter *i)
- {
- struct pipe_inode_info *pipe = i->pipe;
- size_t n, off;
- int idx;
- if (!sanity(i))
- return 0;
- bytes = n = push_pipe(i, bytes, &idx, &off);
- if (unlikely(!n))
- return 0;
- for ( ; n; idx = next_idx(idx, pipe), off = 0) {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
- i->idx = idx;
- i->iov_offset = off + chunk;
- n -= chunk;
- addr += chunk;
- }
- i->count -= bytes;
- return bytes;
- }
- size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
- {
- const char *from = addr;
- if (unlikely(i->type & ITER_PIPE))
- return copy_pipe_to_iter(addr, bytes, i);
- if (iter_is_iovec(i))
- might_fault();
- iterate_and_advance(i, bytes, v,
- copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
- memcpy_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len),
- memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
- )
- return bytes;
- }
- EXPORT_SYMBOL(_copy_to_iter);
- #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
- static int copyout_mcsafe(void __user *to, const void *from, size_t n)
- {
- if (access_ok(VERIFY_WRITE, to, n)) {
- kasan_check_read(from, n);
- n = copy_to_user_mcsafe((__force void *) to, from, n);
- }
- return n;
- }
- static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
- const char *from, size_t len)
- {
- unsigned long ret;
- char *to;
- to = kmap_atomic(page);
- ret = memcpy_mcsafe(to + offset, from, len);
- kunmap_atomic(to);
- return ret;
- }
- static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
- struct iov_iter *i)
- {
- struct pipe_inode_info *pipe = i->pipe;
- size_t n, off, xfer = 0;
- int idx;
- if (!sanity(i))
- return 0;
- bytes = n = push_pipe(i, bytes, &idx, &off);
- if (unlikely(!n))
- return 0;
- for ( ; n; idx = next_idx(idx, pipe), off = 0) {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- unsigned long rem;
- rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
- chunk);
- i->idx = idx;
- i->iov_offset = off + chunk - rem;
- xfer += chunk - rem;
- if (rem)
- break;
- n -= chunk;
- addr += chunk;
- }
- i->count -= xfer;
- return xfer;
- }
- /**
- * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
- * @addr: source kernel address
- * @bytes: total transfer length
- * @iter: destination iterator
- *
- * The pmem driver arranges for filesystem-dax to use this facility via
- * dax_copy_to_iter() for protecting read/write to persistent memory.
- * Unless / until an architecture can guarantee identical performance
- * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
- * performance regression to switch more users to the mcsafe version.
- *
- * Otherwise, the main differences between this and typical _copy_to_iter().
- *
- * * Typical tail/residue handling after a fault retries the copy
- * byte-by-byte until the fault happens again. Re-triggering machine
- * checks is potentially fatal so the implementation uses source
- * alignment and poison alignment assumptions to avoid re-triggering
- * hardware exceptions.
- *
- * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
- * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
- * a short copy.
- *
- * See MCSAFE_TEST for self-test.
- */
- size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
- {
- const char *from = addr;
- unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
- if (unlikely(i->type & ITER_PIPE))
- return copy_pipe_to_iter_mcsafe(addr, bytes, i);
- if (iter_is_iovec(i))
- might_fault();
- iterate_and_advance(i, bytes, v,
- copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
- ({
- rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
- (from += v.bv_len) - v.bv_len, v.bv_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- return bytes;
- }
- }),
- ({
- rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
- v.iov_len);
- if (rem) {
- curr_addr = (unsigned long) from;
- bytes = curr_addr - s_addr - rem;
- return bytes;
- }
- })
- )
- return bytes;
- }
- EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
- #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
- size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
- {
- char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1);
- return 0;
- }
- if (iter_is_iovec(i))
- might_fault();
- iterate_and_advance(i, bytes, v,
- copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
- )
- return bytes;
- }
- EXPORT_SYMBOL(_copy_from_iter);
- bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
- {
- char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
- if (iter_is_iovec(i))
- might_fault();
- iterate_all_kinds(i, bytes, v, ({
- if (copyin((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len))
- return false;
- 0;}),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
- )
- iov_iter_advance(i, bytes);
- return true;
- }
- EXPORT_SYMBOL(_copy_from_iter_full);
- size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
- {
- char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1);
- return 0;
- }
- iterate_and_advance(i, bytes, v,
- __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
- )
- return bytes;
- }
- EXPORT_SYMBOL(_copy_from_iter_nocache);
- #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
- /**
- * _copy_from_iter_flushcache - write destination through cpu cache
- * @addr: destination kernel address
- * @bytes: total transfer length
- * @iter: source iterator
- *
- * The pmem driver arranges for filesystem-dax to use this facility via
- * dax_copy_from_iter() for ensuring that writes to persistent memory
- * are flushed through the CPU cache. It is differentiated from
- * _copy_from_iter_nocache() in that guarantees all data is flushed for
- * all iterator types. The _copy_from_iter_nocache() only attempts to
- * bypass the cache for the ITER_IOVEC case, and on some archs may use
- * instructions that strand dirty-data in the cache.
- */
- size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
- {
- char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1);
- return 0;
- }
- iterate_and_advance(i, bytes, v,
- __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
- memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
- v.iov_len)
- )
- return bytes;
- }
- EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
- #endif
- bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
- {
- char *to = addr;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
- iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len))
- return false;
- 0;}),
- memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
- )
- iov_iter_advance(i, bytes);
- return true;
- }
- EXPORT_SYMBOL(_copy_from_iter_full_nocache);
- static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
- {
- struct page *head;
- size_t v = n + offset;
- /*
- * The general case needs to access the page order in order
- * to compute the page size.
- * However, we mostly deal with order-0 pages and thus can
- * avoid a possible cache line miss for requests that fit all
- * page orders.
- */
- if (n <= v && v <= PAGE_SIZE)
- return true;
- head = compound_head(page);
- v += (page - head) << PAGE_SHIFT;
- if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
- return true;
- WARN_ON(1);
- return false;
- }
- size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
- {
- if (unlikely(!page_copy_sane(page, offset, bytes)))
- return 0;
- if (i->type & (ITER_BVEC|ITER_KVEC)) {
- void *kaddr = kmap_atomic(page);
- size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
- kunmap_atomic(kaddr);
- return wanted;
- } else if (likely(!(i->type & ITER_PIPE)))
- return copy_page_to_iter_iovec(page, offset, bytes, i);
- else
- return copy_page_to_iter_pipe(page, offset, bytes, i);
- }
- EXPORT_SYMBOL(copy_page_to_iter);
- size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
- {
- if (unlikely(!page_copy_sane(page, offset, bytes)))
- return 0;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1);
- return 0;
- }
- if (i->type & (ITER_BVEC|ITER_KVEC)) {
- void *kaddr = kmap_atomic(page);
- size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
- kunmap_atomic(kaddr);
- return wanted;
- } else
- return copy_page_from_iter_iovec(page, offset, bytes, i);
- }
- EXPORT_SYMBOL(copy_page_from_iter);
- static size_t pipe_zero(size_t bytes, struct iov_iter *i)
- {
- struct pipe_inode_info *pipe = i->pipe;
- size_t n, off;
- int idx;
- if (!sanity(i))
- return 0;
- bytes = n = push_pipe(i, bytes, &idx, &off);
- if (unlikely(!n))
- return 0;
- for ( ; n; idx = next_idx(idx, pipe), off = 0) {
- size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- memzero_page(pipe->bufs[idx].page, off, chunk);
- i->idx = idx;
- i->iov_offset = off + chunk;
- n -= chunk;
- }
- i->count -= bytes;
- return bytes;
- }
- size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
- {
- if (unlikely(i->type & ITER_PIPE))
- return pipe_zero(bytes, i);
- iterate_and_advance(i, bytes, v,
- clear_user(v.iov_base, v.iov_len),
- memzero_page(v.bv_page, v.bv_offset, v.bv_len),
- memset(v.iov_base, 0, v.iov_len)
- )
- return bytes;
- }
- EXPORT_SYMBOL(iov_iter_zero);
- size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes)
- {
- char *kaddr = kmap_atomic(page), *p = kaddr + offset;
- if (unlikely(!page_copy_sane(page, offset, bytes))) {
- kunmap_atomic(kaddr);
- return 0;
- }
- if (unlikely(i->type & ITER_PIPE)) {
- kunmap_atomic(kaddr);
- WARN_ON(1);
- return 0;
- }
- iterate_all_kinds(i, bytes, v,
- copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
- memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
- v.bv_offset, v.bv_len),
- memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
- )
- kunmap_atomic(kaddr);
- return bytes;
- }
- EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
- static inline void pipe_truncate(struct iov_iter *i)
- {
- struct pipe_inode_info *pipe = i->pipe;
- if (pipe->nrbufs) {
- size_t off = i->iov_offset;
- int idx = i->idx;
- int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
- if (off) {
- pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
- idx = next_idx(idx, pipe);
- nrbufs++;
- }
- while (pipe->nrbufs > nrbufs) {
- pipe_buf_release(pipe, &pipe->bufs[idx]);
- idx = next_idx(idx, pipe);
- pipe->nrbufs--;
- }
- }
- }
- static void pipe_advance(struct iov_iter *i, size_t size)
- {
- struct pipe_inode_info *pipe = i->pipe;
- if (unlikely(i->count < size))
- size = i->count;
- if (size) {
- struct pipe_buffer *buf;
- size_t off = i->iov_offset, left = size;
- int idx = i->idx;
- if (off) /* make it relative to the beginning of buffer */
- left += off - pipe->bufs[idx].offset;
- while (1) {
- buf = &pipe->bufs[idx];
- if (left <= buf->len)
- break;
- left -= buf->len;
- idx = next_idx(idx, pipe);
- }
- i->idx = idx;
- i->iov_offset = buf->offset + left;
- }
- i->count -= size;
- /* ... and discard everything past that point */
- pipe_truncate(i);
- }
- void iov_iter_advance(struct iov_iter *i, size_t size)
- {
- if (unlikely(i->type & ITER_PIPE)) {
- pipe_advance(i, size);
- return;
- }
- iterate_and_advance(i, size, v, 0, 0, 0)
- }
- EXPORT_SYMBOL(iov_iter_advance);
- void iov_iter_revert(struct iov_iter *i, size_t unroll)
- {
- if (!unroll)
- return;
- if (WARN_ON(unroll > MAX_RW_COUNT))
- return;
- i->count += unroll;
- if (unlikely(i->type & ITER_PIPE)) {
- struct pipe_inode_info *pipe = i->pipe;
- int idx = i->idx;
- size_t off = i->iov_offset;
- while (1) {
- size_t n = off - pipe->bufs[idx].offset;
- if (unroll < n) {
- off -= unroll;
- break;
- }
- unroll -= n;
- if (!unroll && idx == i->start_idx) {
- off = 0;
- break;
- }
- if (!idx--)
- idx = pipe->buffers - 1;
- off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
- }
- i->iov_offset = off;
- i->idx = idx;
- pipe_truncate(i);
- return;
- }
- if (unroll <= i->iov_offset) {
- i->iov_offset -= unroll;
- return;
- }
- unroll -= i->iov_offset;
- if (i->type & ITER_BVEC) {
- const struct bio_vec *bvec = i->bvec;
- while (1) {
- size_t n = (--bvec)->bv_len;
- i->nr_segs++;
- if (unroll <= n) {
- i->bvec = bvec;
- i->iov_offset = n - unroll;
- return;
- }
- unroll -= n;
- }
- } else { /* same logics for iovec and kvec */
- const struct iovec *iov = i->iov;
- while (1) {
- size_t n = (--iov)->iov_len;
- i->nr_segs++;
- if (unroll <= n) {
- i->iov = iov;
- i->iov_offset = n - unroll;
- return;
- }
- unroll -= n;
- }
- }
- }
- EXPORT_SYMBOL(iov_iter_revert);
- /*
- * Return the count of just the current iov_iter segment.
- */
- size_t iov_iter_single_seg_count(const struct iov_iter *i)
- {
- if (unlikely(i->type & ITER_PIPE))
- return i->count; // it is a silly place, anyway
- if (i->nr_segs == 1)
- return i->count;
- else if (i->type & ITER_BVEC)
- return min(i->count, i->bvec->bv_len - i->iov_offset);
- else
- return min(i->count, i->iov->iov_len - i->iov_offset);
- }
- EXPORT_SYMBOL(iov_iter_single_seg_count);
- void iov_iter_kvec(struct iov_iter *i, int direction,
- const struct kvec *kvec, unsigned long nr_segs,
- size_t count)
- {
- BUG_ON(!(direction & ITER_KVEC));
- i->type = direction;
- i->kvec = kvec;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
- }
- EXPORT_SYMBOL(iov_iter_kvec);
- void iov_iter_bvec(struct iov_iter *i, int direction,
- const struct bio_vec *bvec, unsigned long nr_segs,
- size_t count)
- {
- BUG_ON(!(direction & ITER_BVEC));
- i->type = direction;
- i->bvec = bvec;
- i->nr_segs = nr_segs;
- i->iov_offset = 0;
- i->count = count;
- }
- EXPORT_SYMBOL(iov_iter_bvec);
- void iov_iter_pipe(struct iov_iter *i, int direction,
- struct pipe_inode_info *pipe,
- size_t count)
- {
- BUG_ON(direction != ITER_PIPE);
- WARN_ON(pipe->nrbufs == pipe->buffers);
- i->type = direction;
- i->pipe = pipe;
- i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- i->iov_offset = 0;
- i->count = count;
- i->start_idx = i->idx;
- }
- EXPORT_SYMBOL(iov_iter_pipe);
- unsigned long iov_iter_alignment(const struct iov_iter *i)
- {
- unsigned long res = 0;
- size_t size = i->count;
- if (unlikely(i->type & ITER_PIPE)) {
- if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
- return size | i->iov_offset;
- return size;
- }
- iterate_all_kinds(i, size, v,
- (res |= (unsigned long)v.iov_base | v.iov_len, 0),
- res |= v.bv_offset | v.bv_len,
- res |= (unsigned long)v.iov_base | v.iov_len
- )
- return res;
- }
- EXPORT_SYMBOL(iov_iter_alignment);
- unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
- {
- unsigned long res = 0;
- size_t size = i->count;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1);
- return ~0U;
- }
- iterate_all_kinds(i, size, v,
- (res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0), 0),
- (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
- (size != v.bv_len ? size : 0)),
- (res |= (!res ? 0 : (unsigned long)v.iov_base) |
- (size != v.iov_len ? size : 0))
- );
- return res;
- }
- EXPORT_SYMBOL(iov_iter_gap_alignment);
- static inline ssize_t __pipe_get_pages(struct iov_iter *i,
- size_t maxsize,
- struct page **pages,
- int idx,
- size_t *start)
- {
- struct pipe_inode_info *pipe = i->pipe;
- ssize_t n = push_pipe(i, maxsize, &idx, start);
- if (!n)
- return -EFAULT;
- maxsize = n;
- n += *start;
- while (n > 0) {
- get_page(*pages++ = pipe->bufs[idx].page);
- idx = next_idx(idx, pipe);
- n -= PAGE_SIZE;
- }
- return maxsize;
- }
- static ssize_t pipe_get_pages(struct iov_iter *i,
- struct page **pages, size_t maxsize, unsigned maxpages,
- size_t *start)
- {
- unsigned npages;
- size_t capacity;
- int idx;
- if (!maxsize)
- return 0;
- if (!sanity(i))
- return -EFAULT;
- data_start(i, &idx, start);
- /* some of this one + all after this one */
- npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
- capacity = min(npages,maxpages) * PAGE_SIZE - *start;
- return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
- }
- ssize_t iov_iter_get_pages(struct iov_iter *i,
- struct page **pages, size_t maxsize, unsigned maxpages,
- size_t *start)
- {
- if (maxsize > i->count)
- maxsize = i->count;
- if (unlikely(i->type & ITER_PIPE))
- return pipe_get_pages(i, pages, maxsize, maxpages, start);
- iterate_all_kinds(i, maxsize, v, ({
- unsigned long addr = (unsigned long)v.iov_base;
- size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
- int n;
- int res;
- if (len > maxpages * PAGE_SIZE)
- len = maxpages * PAGE_SIZE;
- addr &= ~(PAGE_SIZE - 1);
- n = DIV_ROUND_UP(len, PAGE_SIZE);
- res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
- if (unlikely(res < 0))
- return res;
- return (res == n ? len : res * PAGE_SIZE) - *start;
- 0;}),({
- /* can't be more than PAGE_SIZE */
- *start = v.bv_offset;
- get_page(*pages = v.bv_page);
- return v.bv_len;
- }),({
- return -EFAULT;
- })
- )
- return 0;
- }
- EXPORT_SYMBOL(iov_iter_get_pages);
- static struct page **get_pages_array(size_t n)
- {
- return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
- }
- static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
- struct page ***pages, size_t maxsize,
- size_t *start)
- {
- struct page **p;
- ssize_t n;
- int idx;
- int npages;
- if (!maxsize)
- return 0;
- if (!sanity(i))
- return -EFAULT;
- data_start(i, &idx, start);
- /* some of this one + all after this one */
- npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
- n = npages * PAGE_SIZE - *start;
- if (maxsize > n)
- maxsize = n;
- else
- npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
- p = get_pages_array(npages);
- if (!p)
- return -ENOMEM;
- n = __pipe_get_pages(i, maxsize, p, idx, start);
- if (n > 0)
- *pages = p;
- else
- kvfree(p);
- return n;
- }
- ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
- struct page ***pages, size_t maxsize,
- size_t *start)
- {
- struct page **p;
- if (maxsize > i->count)
- maxsize = i->count;
- if (unlikely(i->type & ITER_PIPE))
- return pipe_get_pages_alloc(i, pages, maxsize, start);
- iterate_all_kinds(i, maxsize, v, ({
- unsigned long addr = (unsigned long)v.iov_base;
- size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
- int n;
- int res;
- addr &= ~(PAGE_SIZE - 1);
- n = DIV_ROUND_UP(len, PAGE_SIZE);
- p = get_pages_array(n);
- if (!p)
- return -ENOMEM;
- res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
- if (unlikely(res < 0)) {
- kvfree(p);
- return res;
- }
- *pages = p;
- return (res == n ? len : res * PAGE_SIZE) - *start;
- 0;}),({
- /* can't be more than PAGE_SIZE */
- *start = v.bv_offset;
- *pages = p = get_pages_array(1);
- if (!p)
- return -ENOMEM;
- get_page(*p = v.bv_page);
- return v.bv_len;
- }),({
- return -EFAULT;
- })
- )
- return 0;
- }
- EXPORT_SYMBOL(iov_iter_get_pages_alloc);
- size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
- {
- char *to = addr;
- __wsum sum, next;
- size_t off = 0;
- sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1);
- return 0;
- }
- iterate_and_advance(i, bytes, v, ({
- int err = 0;
- next = csum_and_copy_from_user(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0, &err);
- if (!err) {
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- }
- err ? v.iov_len : 0;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck(p + v.bv_offset,
- (to += v.bv_len) - v.bv_len,
- v.bv_len, 0);
- kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
- off += v.bv_len;
- }),({
- next = csum_partial_copy_nocheck(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- })
- )
- *csum = sum;
- return bytes;
- }
- EXPORT_SYMBOL(csum_and_copy_from_iter);
- bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
- {
- char *to = addr;
- __wsum sum, next;
- size_t off = 0;
- sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1);
- return false;
- }
- if (unlikely(i->count < bytes))
- return false;
- iterate_all_kinds(i, bytes, v, ({
- int err = 0;
- next = csum_and_copy_from_user(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0, &err);
- if (err)
- return false;
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- 0;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck(p + v.bv_offset,
- (to += v.bv_len) - v.bv_len,
- v.bv_len, 0);
- kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
- off += v.bv_len;
- }),({
- next = csum_partial_copy_nocheck(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- })
- )
- *csum = sum;
- iov_iter_advance(i, bytes);
- return true;
- }
- EXPORT_SYMBOL(csum_and_copy_from_iter_full);
- size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
- {
- const char *from = addr;
- __wsum sum, next;
- size_t off = 0;
- sum = *csum;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1); /* for now */
- return 0;
- }
- iterate_and_advance(i, bytes, v, ({
- int err = 0;
- next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
- v.iov_base,
- v.iov_len, 0, &err);
- if (!err) {
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- }
- err ? v.iov_len : 0;
- }), ({
- char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
- p + v.bv_offset,
- v.bv_len, 0);
- kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
- off += v.bv_len;
- }),({
- next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
- v.iov_base,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
- off += v.iov_len;
- })
- )
- *csum = sum;
- return bytes;
- }
- EXPORT_SYMBOL(csum_and_copy_to_iter);
- int iov_iter_npages(const struct iov_iter *i, int maxpages)
- {
- size_t size = i->count;
- int npages = 0;
- if (!size)
- return 0;
- if (unlikely(i->type & ITER_PIPE)) {
- struct pipe_inode_info *pipe = i->pipe;
- size_t off;
- int idx;
- if (!sanity(i))
- return 0;
- data_start(i, &idx, &off);
- /* some of this one + all after this one */
- npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
- if (npages >= maxpages)
- return maxpages;
- } else iterate_all_kinds(i, size, v, ({
- unsigned long p = (unsigned long)v.iov_base;
- npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- - p / PAGE_SIZE;
- if (npages >= maxpages)
- return maxpages;
- 0;}),({
- npages++;
- if (npages >= maxpages)
- return maxpages;
- }),({
- unsigned long p = (unsigned long)v.iov_base;
- npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
- - p / PAGE_SIZE;
- if (npages >= maxpages)
- return maxpages;
- })
- )
- return npages;
- }
- EXPORT_SYMBOL(iov_iter_npages);
- const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
- {
- *new = *old;
- if (unlikely(new->type & ITER_PIPE)) {
- WARN_ON(1);
- return NULL;
- }
- if (new->type & ITER_BVEC)
- return new->bvec = kmemdup(new->bvec,
- new->nr_segs * sizeof(struct bio_vec),
- flags);
- else
- /* iovec and kvec have identical layout */
- return new->iov = kmemdup(new->iov,
- new->nr_segs * sizeof(struct iovec),
- flags);
- }
- EXPORT_SYMBOL(dup_iter);
- /**
- * import_iovec() - Copy an array of &struct iovec from userspace
- * into the kernel, check that it is valid, and initialize a new
- * &struct iov_iter iterator to access it.
- *
- * @type: One of %READ or %WRITE.
- * @uvector: Pointer to the userspace array.
- * @nr_segs: Number of elements in userspace array.
- * @fast_segs: Number of elements in @iov.
- * @iov: (input and output parameter) Pointer to pointer to (usually small
- * on-stack) kernel array.
- * @i: Pointer to iterator that will be initialized on success.
- *
- * If the array pointed to by *@iov is large enough to hold all @nr_segs,
- * then this function places %NULL in *@iov on return. Otherwise, a new
- * array will be allocated and the result placed in *@iov. This means that
- * the caller may call kfree() on *@iov regardless of whether the small
- * on-stack array was used or not (and regardless of whether this function
- * returns an error or not).
- *
- * Return: 0 on success or negative error code on error.
- */
- int import_iovec(int type, const struct iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i)
- {
- ssize_t n;
- struct iovec *p;
- n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
- *iov, &p);
- if (n < 0) {
- if (p != *iov)
- kfree(p);
- *iov = NULL;
- return n;
- }
- iov_iter_init(i, type, p, nr_segs, n);
- *iov = p == *iov ? NULL : p;
- return 0;
- }
- EXPORT_SYMBOL(import_iovec);
- #ifdef CONFIG_COMPAT
- #include <linux/compat.h>
- int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i)
- {
- ssize_t n;
- struct iovec *p;
- n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
- *iov, &p);
- if (n < 0) {
- if (p != *iov)
- kfree(p);
- *iov = NULL;
- return n;
- }
- iov_iter_init(i, type, p, nr_segs, n);
- *iov = p == *iov ? NULL : p;
- return 0;
- }
- #endif
- int import_single_range(int rw, void __user *buf, size_t len,
- struct iovec *iov, struct iov_iter *i)
- {
- if (len > MAX_RW_COUNT)
- len = MAX_RW_COUNT;
- if (unlikely(!access_ok(!rw, buf, len)))
- return -EFAULT;
- iov->iov_base = buf;
- iov->iov_len = len;
- iov_iter_init(i, rw, iov, 1, len);
- return 0;
- }
- EXPORT_SYMBOL(import_single_range);
- int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
- int (*f)(struct kvec *vec, void *context),
- void *context)
- {
- struct kvec w;
- int err = -EINVAL;
- if (!bytes)
- return 0;
- iterate_all_kinds(i, bytes, v, -EINVAL, ({
- w.iov_base = kmap(v.bv_page) + v.bv_offset;
- w.iov_len = v.bv_len;
- err = f(&w, context);
- kunmap(v.bv_page);
- err;}), ({
- w = v;
- err = f(&w, context);})
- )
- return err;
- }
- EXPORT_SYMBOL(iov_iter_for_each_range);
|