xref: /mimiker/sys/kern/uio.c (revision ec667fa8)
1 #include <sys/klog.h>
2 #include <sys/uio.h>
3 #include <sys/libkern.h>
4 #include <sys/vm_map.h>
5 #include <sys/malloc.h>
6 #include <sys/errno.h>
7 
8 static int copyin_vmspace(vm_map_t *vm, const void *restrict udaddr,
9                           void *restrict kaddr, size_t len) {
10   if (vm == vm_map_kernel()) {
11     memcpy(kaddr, udaddr, len);
12     return 0;
13   }
14 
15   if (vm == vm_map_user())
16     return copyin(udaddr, kaddr, len);
17 
18   panic("copyin on non-active vm maps is not supported");
19 }
20 
21 static int copyout_vmspace(vm_map_t *vm, const void *restrict kaddr,
22                            void *restrict udaddr, size_t len) {
23   if (vm == vm_map_kernel()) {
24     memcpy(udaddr, kaddr, len);
25     return 0;
26   }
27 
28   if (vm == vm_map_user())
29     return copyout(kaddr, udaddr, len);
30 
31   panic("copyout on non-active vm maps is not supported");
32 }
33 
34 /* Heavily inspired by NetBSD's uiomove */
35 /* This function modifies uio to reflect on the progress. */
36 int uiomove(void *buf, size_t n, uio_t *uio) {
37   /* Calling uiomove from critical section (no interrupts or no preemption)
38    * is not allowed since it may be copying from pageable memory. */
39   assert(!intr_disabled() && !preempt_disabled());
40 
41   char *cbuf = buf;
42   int error = 0;
43 
44   assert(uio->uio_op == UIO_READ || uio->uio_op == UIO_WRITE);
45 
46   while (n > 0 && uio->uio_resid > 0) {
47     /* Take the first io vector */
48     iovec_t *iov = uio->uio_iov;
49     size_t cnt = iov->iov_len - uio->uio_iovoff;
50 
51     if (cnt == 0) {
52       /* If no data left to move in this vector, proceed to the next io vector,
53          or finish moving data if this was the last vector. */
54       if (uio->uio_iovcnt == 0)
55         break;
56       uio->uio_iov++;
57       uio->uio_iovcnt--;
58       uio->uio_iovoff = 0;
59       continue;
60     }
61     if (cnt > n)
62       cnt = n;
63     char *base = iov->iov_base + uio->uio_iovoff;
64     /* Perform copyout/copyin. */
65     if (uio->uio_op == UIO_READ)
66       error = copyout_vmspace(uio->uio_vmspace, cbuf, base, cnt);
67     else
68       error = copyin_vmspace(uio->uio_vmspace, base, cbuf, cnt);
69     /* Exit immediately if there was a problem with moving data */
70     if (error)
71       break;
72 
73     uio->uio_iovoff += cnt;
74     uio->uio_resid -= cnt;
75     uio->uio_offset += cnt;
76     cbuf += cnt;
77     n -= cnt;
78   }
79 
80   /* Invert error sign, because copy routines use negative error codes */
81   return error;
82 }
83 
84 void uio_save(const uio_t *uio, uiostate_t *save) {
85   save->us_resid = uio->uio_resid;
86   save->us_iovcnt = uio->uio_iovcnt;
87   save->us_iovoff = uio->uio_iovoff;
88 }
89 
90 void uio_restore(uio_t *uio, const uiostate_t *save) {
91   size_t nbytes = save->us_resid - uio->uio_resid;
92   uio->uio_resid += nbytes;
93   uio->uio_offset -= nbytes;
94   uio->uio_iovoff = save->us_iovoff;
95   uio->uio_iov -= save->us_iovcnt - uio->uio_iovcnt;
96   uio->uio_iovcnt = save->us_iovcnt;
97 }
98 
99 int uiomove_frombuf(void *buf, size_t buflen, struct uio *uio) {
100   size_t offset = uio->uio_offset;
101   assert(offset <= buflen);
102   assert(uio->uio_offset >= 0);
103 
104   return uiomove((char *)buf + offset, buflen - offset, uio);
105 }
106 
107 int iovec_length(const iovec_t *iov, int iovcnt, size_t *lengthp) {
108   size_t len = 0;
109   for (int i = 0; i < iovcnt; i++) {
110     len += iov[i].iov_len;
111     /* Ensure that the total data size fits in ssize_t. */
112     if (len > SSIZE_MAX || iov[i].iov_len > SSIZE_MAX)
113       return EINVAL;
114   }
115   *lengthp = len;
116   return 0;
117 }
118