123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373 |
- #include <linux/mm.h>
- #include <linux/uio.h>
- #include <linux/sched.h>
- #include <linux/highmem.h>
- #include <linux/ptrace.h>
- #include <linux/slab.h>
- #include <linux/syscalls.h>
- #ifdef CONFIG_COMPAT
- #include <linux/compat.h>
- #endif
- static int process_vm_rw_pages(struct page **pages,
- unsigned offset,
- size_t len,
- struct iov_iter *iter,
- int vm_write)
- {
-
- while (len && iov_iter_count(iter)) {
- struct page *page = *pages++;
- size_t copy = PAGE_SIZE - offset;
- size_t copied;
- if (copy > len)
- copy = len;
- if (vm_write) {
- copied = copy_page_from_iter(page, offset, copy, iter);
- set_page_dirty_lock(page);
- } else {
- copied = copy_page_to_iter(page, offset, copy, iter);
- }
- len -= copied;
- if (copied < copy && iov_iter_count(iter))
- return -EFAULT;
- offset = 0;
- }
- return 0;
- }
- #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
- static int process_vm_rw_single_vec(unsigned long addr,
- unsigned long len,
- struct iov_iter *iter,
- struct page **process_pages,
- struct mm_struct *mm,
- struct task_struct *task,
- int vm_write)
- {
- unsigned long pa = addr & PAGE_MASK;
- unsigned long start_offset = addr - pa;
- unsigned long nr_pages;
- ssize_t rc = 0;
- unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
- / sizeof(struct pages *);
- unsigned int flags = FOLL_REMOTE;
-
- if (len == 0)
- return 0;
- nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
- if (vm_write)
- flags |= FOLL_WRITE;
- while (!rc && nr_pages && iov_iter_count(iter)) {
- int pages = min(nr_pages, max_pages_per_loop);
- size_t bytes;
-
- pages = __get_user_pages_unlocked(task, mm, pa, pages,
- process_pages, flags);
- if (pages <= 0)
- return -EFAULT;
- bytes = pages * PAGE_SIZE - start_offset;
- if (bytes > len)
- bytes = len;
- rc = process_vm_rw_pages(process_pages,
- start_offset, bytes, iter,
- vm_write);
- len -= bytes;
- start_offset = 0;
- nr_pages -= pages;
- pa += pages * PAGE_SIZE;
- while (pages)
- put_page(process_pages[--pages]);
- }
- return rc;
- }
- #define PVM_MAX_PP_ARRAY_COUNT 16
- static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
- const struct iovec *rvec,
- unsigned long riovcnt,
- unsigned long flags, int vm_write)
- {
- struct task_struct *task;
- struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
- struct page **process_pages = pp_stack;
- struct mm_struct *mm;
- unsigned long i;
- ssize_t rc = 0;
- unsigned long nr_pages = 0;
- unsigned long nr_pages_iov;
- ssize_t iov_len;
- size_t total_len = iov_iter_count(iter);
-
- for (i = 0; i < riovcnt; i++) {
- iov_len = rvec[i].iov_len;
- if (iov_len > 0) {
- nr_pages_iov = ((unsigned long)rvec[i].iov_base
- + iov_len)
- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
- / PAGE_SIZE + 1;
- nr_pages = max(nr_pages, nr_pages_iov);
- }
- }
- if (nr_pages == 0)
- return 0;
- if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
-
- process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
- sizeof(struct pages *)*nr_pages),
- GFP_KERNEL);
- if (!process_pages)
- return -ENOMEM;
- }
-
- rcu_read_lock();
- task = find_task_by_vpid(pid);
- if (task)
- get_task_struct(task);
- rcu_read_unlock();
- if (!task) {
- rc = -ESRCH;
- goto free_proc_pages;
- }
- mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
- if (!mm || IS_ERR(mm)) {
- rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
-
- if (rc == -EACCES)
- rc = -EPERM;
- goto put_task_struct;
- }
- for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
- rc = process_vm_rw_single_vec(
- (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
- iter, process_pages, mm, task, vm_write);
-
- total_len -= iov_iter_count(iter);
-
- if (total_len)
- rc = total_len;
- mmput(mm);
- put_task_struct:
- put_task_struct(task);
- free_proc_pages:
- if (process_pages != pp_stack)
- kfree(process_pages);
- return rc;
- }
- static ssize_t process_vm_rw(pid_t pid,
- const struct iovec __user *lvec,
- unsigned long liovcnt,
- const struct iovec __user *rvec,
- unsigned long riovcnt,
- unsigned long flags, int vm_write)
- {
- struct iovec iovstack_l[UIO_FASTIOV];
- struct iovec iovstack_r[UIO_FASTIOV];
- struct iovec *iov_l = iovstack_l;
- struct iovec *iov_r = iovstack_r;
- struct iov_iter iter;
- ssize_t rc;
- int dir = vm_write ? WRITE : READ;
- if (flags != 0)
- return -EINVAL;
-
- rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
- if (rc < 0)
- return rc;
- if (!iov_iter_count(&iter))
- goto free_iovecs;
- rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
- iovstack_r, &iov_r);
- if (rc <= 0)
- goto free_iovecs;
- rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
- free_iovecs:
- if (iov_r != iovstack_r)
- kfree(iov_r);
- kfree(iov_l);
- return rc;
- }
- SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
- unsigned long, liovcnt, const struct iovec __user *, rvec,
- unsigned long, riovcnt, unsigned long, flags)
- {
- return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
- }
- SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
- const struct iovec __user *, lvec,
- unsigned long, liovcnt, const struct iovec __user *, rvec,
- unsigned long, riovcnt, unsigned long, flags)
- {
- return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
- }
- #ifdef CONFIG_COMPAT
- static ssize_t
- compat_process_vm_rw(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- unsigned long liovcnt,
- const struct compat_iovec __user *rvec,
- unsigned long riovcnt,
- unsigned long flags, int vm_write)
- {
- struct iovec iovstack_l[UIO_FASTIOV];
- struct iovec iovstack_r[UIO_FASTIOV];
- struct iovec *iov_l = iovstack_l;
- struct iovec *iov_r = iovstack_r;
- struct iov_iter iter;
- ssize_t rc = -EFAULT;
- int dir = vm_write ? WRITE : READ;
- if (flags != 0)
- return -EINVAL;
- rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
- if (rc < 0)
- return rc;
- if (!iov_iter_count(&iter))
- goto free_iovecs;
- rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
- UIO_FASTIOV, iovstack_r,
- &iov_r);
- if (rc <= 0)
- goto free_iovecs;
- rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
- free_iovecs:
- if (iov_r != iovstack_r)
- kfree(iov_r);
- kfree(iov_l);
- return rc;
- }
- COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid,
- const struct compat_iovec __user *, lvec,
- compat_ulong_t, liovcnt,
- const struct compat_iovec __user *, rvec,
- compat_ulong_t, riovcnt,
- compat_ulong_t, flags)
- {
- return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
- riovcnt, flags, 0);
- }
- COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid,
- const struct compat_iovec __user *, lvec,
- compat_ulong_t, liovcnt,
- const struct compat_iovec __user *, rvec,
- compat_ulong_t, riovcnt,
- compat_ulong_t, flags)
- {
- return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
- riovcnt, flags, 1);
- }
- #endif
|