VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/sharedfolders/regops.c@ 75837

最後變更 在這個檔案從75837是 72627,由 vboxsync 提交於 6 年 前

Additions: relicence components needed for Linux shared folders to MIT.
bugref:9109: Shared folders: update to match in-kernel code more closely
This change makes the code on which the Linux kernel shared folder patch is
based MIT-licenced, so that the version in the Linux kernel can be too. This
would make it easier to move code back and forth.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 21.2 KB
 
1/* $Id: regops.c 72627 2018-06-20 13:53:28Z vboxsync $ */
2/** @file
3 * vboxsf - VBox Linux Shared Folders, Regular file inode and file operations.
4 */
5
6/*
7 * Copyright (C) 2006-2018 Oracle Corporation
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31/*
32 * Limitations: only COW memory mapping is supported
33 */
34
35#include "vfsmod.h"
36
37static void *alloc_bounce_buffer(size_t * tmp_sizep, PRTCCPHYS physp, size_t
38 xfer_size, const char *caller)
39{
40 size_t tmp_size;
41 void *tmp;
42
43 /* try for big first. */
44 tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE);
45 if (tmp_size > 16U * _1K)
46 tmp_size = 16U * _1K;
47 tmp = kmalloc(tmp_size, GFP_KERNEL);
48 if (!tmp) {
49 /* fall back on a page sized buffer. */
50 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
51 if (!tmp) {
52 LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size));
53 return NULL;
54 }
55 tmp_size = PAGE_SIZE;
56 }
57
58 *tmp_sizep = tmp_size;
59 *physp = virt_to_phys(tmp);
60 return tmp;
61}
62
63static void free_bounce_buffer(void *tmp)
64{
65 kfree(tmp);
66}
67
68/* fops */
69static int sf_reg_read_aux(const char *caller, struct sf_glob_info *sf_g,
70 struct sf_reg_info *sf_r, void *buf,
71 uint32_t * nread, uint64_t pos)
72{
73 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
74 * contiguous in physical memory (kmalloc or single page), we should
75 * use a physical address here to speed things up. */
76 int rc = VbglR0SfRead(&client_handle, &sf_g->map, sf_r->handle,
77 pos, nread, buf, false /* already locked? */ );
78 if (RT_FAILURE(rc)) {
79 LogFunc(("VbglR0SfRead failed. caller=%s, rc=%Rrc\n", caller,
80 rc));
81 return -EPROTO;
82 }
83 return 0;
84}
85
86static int sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g,
87 struct sf_reg_info *sf_r, void *buf,
88 uint32_t * nwritten, uint64_t pos)
89{
90 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
91 * contiguous in physical memory (kmalloc or single page), we should
92 * use a physical address here to speed things up. */
93 int rc = VbglR0SfWrite(&client_handle, &sf_g->map, sf_r->handle,
94 pos, nwritten, buf,
95 false /* already locked? */ );
96 if (RT_FAILURE(rc)) {
97 LogFunc(("VbglR0SfWrite failed. caller=%s, rc=%Rrc\n",
98 caller, rc));
99 return -EPROTO;
100 }
101 return 0;
102}
103
104#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && \
105 LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
106
107void free_pipebuf(struct page *kpage)
108{
109 kunmap(kpage);
110 __free_pages(kpage, 0);
111}
112
113void *sf_pipe_buf_map(struct pipe_inode_info *pipe,
114 struct pipe_buffer *pipe_buf, int atomic)
115{
116 return 0;
117}
118
119void sf_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *pipe_buf)
120{
121}
122
123void sf_pipe_buf_unmap(struct pipe_inode_info *pipe,
124 struct pipe_buffer *pipe_buf, void *map_data)
125{
126}
127
128int sf_pipe_buf_steal(struct pipe_inode_info *pipe,
129 struct pipe_buffer *pipe_buf)
130{
131 return 0;
132}
133
134static void sf_pipe_buf_release(struct pipe_inode_info *pipe,
135 struct pipe_buffer *pipe_buf)
136{
137 free_pipebuf(pipe_buf->page);
138}
139
140int sf_pipe_buf_confirm(struct pipe_inode_info *info,
141 struct pipe_buffer *pipe_buf)
142{
143 return 0;
144}
145
146static struct pipe_buf_operations sf_pipe_buf_ops = {
147 .can_merge = 0,
148 .map = sf_pipe_buf_map,
149 .unmap = sf_pipe_buf_unmap,
150 .confirm = sf_pipe_buf_confirm,
151 .release = sf_pipe_buf_release,
152 .steal = sf_pipe_buf_steal,
153 .get = sf_pipe_buf_get,
154};
155
156#define LOCK_PIPE(pipe) \
157 if (pipe->inode) \
158 mutex_lock(&pipe->inode->i_mutex);
159
160#define UNLOCK_PIPE(pipe) \
161 if (pipe->inode) \
162 mutex_unlock(&pipe->inode->i_mutex);
163
164ssize_t
165sf_splice_read(struct file *in, loff_t * poffset,
166 struct pipe_inode_info *pipe, size_t len, unsigned int flags)
167{
168 size_t bytes_remaining = len;
169 loff_t orig_offset = *poffset;
170 loff_t offset = orig_offset;
171 struct inode *inode = GET_F_DENTRY(in)->d_inode;
172 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
173 struct sf_reg_info *sf_r = in->private_data;
174 ssize_t retval;
175 struct page *kpage = 0;
176 size_t nsent = 0;
177
178 TRACE();
179 if (!S_ISREG(inode->i_mode)) {
180 LogFunc(("read from non regular file %d\n", inode->i_mode));
181 return -EINVAL;
182 }
183 if (!len) {
184 return 0;
185 }
186
187 LOCK_PIPE(pipe);
188
189 uint32_t req_size = 0;
190 while (bytes_remaining > 0) {
191 kpage = alloc_page(GFP_KERNEL);
192 if (unlikely(kpage == NULL)) {
193 UNLOCK_PIPE(pipe);
194 return -ENOMEM;
195 }
196 req_size = 0;
197 uint32_t nread = req_size =
198 (uint32_t) min(bytes_remaining, (size_t) PAGE_SIZE);
199 uint32_t chunk = 0;
200 void *kbuf = kmap(kpage);
201 while (chunk < req_size) {
202 retval =
203 sf_reg_read_aux(__func__, sf_g, sf_r, kbuf + chunk,
204 &nread, offset);
205 if (retval < 0)
206 goto err;
207 if (nread == 0)
208 break;
209 chunk += nread;
210 offset += nread;
211 nread = req_size - chunk;
212 }
213 if (!pipe->readers) {
214 send_sig(SIGPIPE, current, 0);
215 retval = -EPIPE;
216 goto err;
217 }
218 if (pipe->nrbufs < PIPE_BUFFERS) {
219 struct pipe_buffer *pipebuf =
220 pipe->bufs +
221 ((pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS -
222 1));
223 pipebuf->page = kpage;
224 pipebuf->ops = &sf_pipe_buf_ops;
225 pipebuf->len = req_size;
226 pipebuf->offset = 0;
227 pipebuf->private = 0;
228 pipebuf->flags = 0;
229 pipe->nrbufs++;
230 nsent += req_size;
231 bytes_remaining -= req_size;
232 if (signal_pending(current))
233 break;
234 } else { /* pipe full */
235
236 if (flags & SPLICE_F_NONBLOCK) {
237 retval = -EAGAIN;
238 goto err;
239 }
240 free_pipebuf(kpage);
241 break;
242 }
243 }
244 UNLOCK_PIPE(pipe);
245 if (!nsent && signal_pending(current))
246 return -ERESTARTSYS;
247 *poffset += nsent;
248 return offset - orig_offset;
249
250 err:
251 UNLOCK_PIPE(pipe);
252 free_pipebuf(kpage);
253 return retval;
254}
255
256#endif
257
258/**
259 * Read from a regular file.
260 *
261 * @param file the file
262 * @param buf the buffer
263 * @param size length of the buffer
264 * @param off offset within the file
265 * @returns the number of read bytes on success, Linux error code otherwise
266 */
267static ssize_t sf_reg_read(struct file *file, char *buf, size_t size,
268 loff_t * off)
269{
270 int err;
271 void *tmp;
272 RTCCPHYS tmp_phys;
273 size_t tmp_size;
274 size_t left = size;
275 ssize_t total_bytes_read = 0;
276 struct inode *inode = GET_F_DENTRY(file)->d_inode;
277 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
278 struct sf_reg_info *sf_r = file->private_data;
279 loff_t pos = *off;
280
281 TRACE();
282 if (!S_ISREG(inode->i_mode)) {
283 LogFunc(("read from non regular file %d\n", inode->i_mode));
284 return -EINVAL;
285 }
286
287 /** XXX Check read permission according to inode->i_mode! */
288
289 if (!size)
290 return 0;
291
292 tmp =
293 alloc_bounce_buffer(&tmp_size, &tmp_phys, size,
294 __PRETTY_FUNCTION__);
295 if (!tmp)
296 return -ENOMEM;
297
298 while (left) {
299 uint32_t to_read, nread;
300
301 to_read = tmp_size;
302 if (to_read > left)
303 to_read = (uint32_t) left;
304
305 nread = to_read;
306
307 err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos);
308 if (err)
309 goto fail;
310
311 if (copy_to_user(buf, tmp, nread)) {
312 err = -EFAULT;
313 goto fail;
314 }
315
316 pos += nread;
317 left -= nread;
318 buf += nread;
319 total_bytes_read += nread;
320 if (nread != to_read)
321 break;
322 }
323
324 *off += total_bytes_read;
325 free_bounce_buffer(tmp);
326 return total_bytes_read;
327
328 fail:
329 free_bounce_buffer(tmp);
330 return err;
331}
332
333/**
334 * Write to a regular file.
335 *
336 * @param file the file
337 * @param buf the buffer
338 * @param size length of the buffer
339 * @param off offset within the file
340 * @returns the number of written bytes on success, Linux error code otherwise
341 */
342static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size,
343 loff_t * off)
344{
345 int err;
346 void *tmp;
347 RTCCPHYS tmp_phys;
348 size_t tmp_size;
349 size_t left = size;
350 ssize_t total_bytes_written = 0;
351 struct inode *inode = GET_F_DENTRY(file)->d_inode;
352 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
353 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
354 struct sf_reg_info *sf_r = file->private_data;
355 loff_t pos;
356
357 TRACE();
358 BUG_ON(!sf_i);
359 BUG_ON(!sf_g);
360 BUG_ON(!sf_r);
361
362 if (!S_ISREG(inode->i_mode)) {
363 LogFunc(("write to non regular file %d\n", inode->i_mode));
364 return -EINVAL;
365 }
366
367 pos = *off;
368 if (file->f_flags & O_APPEND) {
369 pos = inode->i_size;
370 *off = pos;
371 }
372
373 /** XXX Check write permission according to inode->i_mode! */
374
375 if (!size)
376 return 0;
377
378 tmp =
379 alloc_bounce_buffer(&tmp_size, &tmp_phys, size,
380 __PRETTY_FUNCTION__);
381 if (!tmp)
382 return -ENOMEM;
383
384 while (left) {
385 uint32_t to_write, nwritten;
386
387 to_write = tmp_size;
388 if (to_write > left)
389 to_write = (uint32_t) left;
390
391 nwritten = to_write;
392
393 if (copy_from_user(tmp, buf, to_write)) {
394 err = -EFAULT;
395 goto fail;
396 }
397
398 err =
399 VbglR0SfWritePhysCont(&client_handle, &sf_g->map,
400 sf_r->handle, pos, &nwritten,
401 tmp_phys);
402 err = RT_FAILURE(err) ? -EPROTO : 0;
403 if (err)
404 goto fail;
405
406 pos += nwritten;
407 left -= nwritten;
408 buf += nwritten;
409 total_bytes_written += nwritten;
410 if (nwritten != to_write)
411 break;
412 }
413
414 *off += total_bytes_written;
415 if (*off > inode->i_size)
416 inode->i_size = *off;
417
418 sf_i->force_restat = 1;
419 free_bounce_buffer(tmp);
420 return total_bytes_written;
421
422 fail:
423 free_bounce_buffer(tmp);
424 return err;
425}
426
427/**
428 * Open a regular file.
429 *
430 * @param inode the inode
431 * @param file the file
432 * @returns 0 on success, Linux error code otherwise
433 */
434static int sf_reg_open(struct inode *inode, struct file *file)
435{
436 int rc, rc_linux = 0;
437 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
438 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
439 struct sf_reg_info *sf_r;
440 SHFLCREATEPARMS params;
441
442 TRACE();
443 BUG_ON(!sf_g);
444 BUG_ON(!sf_i);
445
446 LogFunc(("open %s\n", sf_i->path->String.utf8));
447
448 sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
449 if (!sf_r) {
450 LogRelFunc(("could not allocate reg info\n"));
451 return -ENOMEM;
452 }
453
454 /* Already open? */
455 if (sf_i->handle != SHFL_HANDLE_NIL) {
456 /*
457 * This inode was created with sf_create_aux(). Check the CreateFlags:
458 * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
459 * about the access flags (SHFL_CF_ACCESS_*).
460 */
461 sf_i->force_restat = 1;
462 sf_r->handle = sf_i->handle;
463 sf_i->handle = SHFL_HANDLE_NIL;
464 sf_i->file = file;
465 file->private_data = sf_r;
466 return 0;
467 }
468
469 RT_ZERO(params);
470 params.Handle = SHFL_HANDLE_NIL;
471 /* We check the value of params.Handle afterwards to find out if
472 * the call succeeded or failed, as the API does not seem to cleanly
473 * distinguish error and informational messages.
474 *
475 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
476 * make the shared folders host service use our fMode parameter */
477
478 if (file->f_flags & O_CREAT) {
479 LogFunc(("O_CREAT set\n"));
480 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
481 /* We ignore O_EXCL, as the Linux kernel seems to call create
482 beforehand itself, so O_EXCL should always fail. */
483 if (file->f_flags & O_TRUNC) {
484 LogFunc(("O_TRUNC set\n"));
485 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
486 } else
487 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
488 } else {
489 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
490 if (file->f_flags & O_TRUNC) {
491 LogFunc(("O_TRUNC set\n"));
492 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
493 }
494 }
495
496 switch (file->f_flags & O_ACCMODE) {
497 case O_RDONLY:
498 params.CreateFlags |= SHFL_CF_ACCESS_READ;
499 break;
500
501 case O_WRONLY:
502 params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
503 break;
504
505 case O_RDWR:
506 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
507 break;
508
509 default:
510 BUG();
511 }
512
513 if (file->f_flags & O_APPEND) {
514 LogFunc(("O_APPEND set\n"));
515 params.CreateFlags |= SHFL_CF_ACCESS_APPEND;
516 }
517
518 params.Info.Attr.fMode = inode->i_mode;
519 LogFunc(("sf_reg_open: calling VbglR0SfCreate, file %s, flags=%#x, %#x\n", sf_i->path->String.utf8, file->f_flags, params.CreateFlags));
520 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, &params);
521 if (RT_FAILURE(rc)) {
522 LogFunc(("VbglR0SfCreate failed flags=%d,%#x rc=%Rrc\n",
523 file->f_flags, params.CreateFlags, rc));
524 kfree(sf_r);
525 return -RTErrConvertToErrno(rc);
526 }
527
528 if (SHFL_HANDLE_NIL == params.Handle) {
529 switch (params.Result) {
530 case SHFL_PATH_NOT_FOUND:
531 case SHFL_FILE_NOT_FOUND:
532 rc_linux = -ENOENT;
533 break;
534 case SHFL_FILE_EXISTS:
535 rc_linux = -EEXIST;
536 break;
537 default:
538 break;
539 }
540 }
541
542 sf_i->force_restat = 1;
543 sf_r->handle = params.Handle;
544 sf_i->file = file;
545 file->private_data = sf_r;
546 return rc_linux;
547}
548
549/**
550 * Close a regular file.
551 *
552 * @param inode the inode
553 * @param file the file
554 * @returns 0 on success, Linux error code otherwise
555 */
556static int sf_reg_release(struct inode *inode, struct file *file)
557{
558 int rc;
559 struct sf_reg_info *sf_r;
560 struct sf_glob_info *sf_g;
561 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
562
563 TRACE();
564 sf_g = GET_GLOB_INFO(inode->i_sb);
565 sf_r = file->private_data;
566
567 BUG_ON(!sf_g);
568 BUG_ON(!sf_r);
569
570#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
571 /* See the smbfs source (file.c). mmap in particular can cause data to be
572 * written to the file after it is closed, which we can't cope with. We
573 * copy and paste the body of filemap_write_and_wait() here as it was not
574 * defined before 2.6.6 and not exported until quite a bit later. */
575 /* filemap_write_and_wait(inode->i_mapping); */
576 if (inode->i_mapping->nrpages
577 && filemap_fdatawrite(inode->i_mapping) != -EIO)
578 filemap_fdatawait(inode->i_mapping);
579#endif
580 rc = VbglR0SfClose(&client_handle, &sf_g->map, sf_r->handle);
581 if (RT_FAILURE(rc))
582 LogFunc(("VbglR0SfClose failed rc=%Rrc\n", rc));
583
584 kfree(sf_r);
585 sf_i->file = NULL;
586 sf_i->handle = SHFL_HANDLE_NIL;
587 file->private_data = NULL;
588 return 0;
589}
590
591#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
592static int sf_reg_fault(struct vm_fault *vmf)
593#elif LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
594static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
595#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
596static struct page *sf_reg_nopage(struct vm_area_struct *vma,
597 unsigned long vaddr, int *type)
598#define SET_TYPE(t) *type = (t)
599#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
600static struct page *sf_reg_nopage(struct vm_area_struct *vma,
601 unsigned long vaddr, int unused)
602#define SET_TYPE(t)
603#endif
604{
605 struct page *page;
606 char *buf;
607 loff_t off;
608 uint32_t nread = PAGE_SIZE;
609 int err;
610#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
611 struct vm_area_struct *vma = vmf->vma;
612#endif
613 struct file *file = vma->vm_file;
614 struct inode *inode = GET_F_DENTRY(file)->d_inode;
615 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
616 struct sf_reg_info *sf_r = file->private_data;
617
618 TRACE();
619#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
620 if (vmf->pgoff > vma->vm_end)
621 return VM_FAULT_SIGBUS;
622#else
623 if (vaddr > vma->vm_end) {
624 SET_TYPE(VM_FAULT_SIGBUS);
625 return NOPAGE_SIGBUS;
626 }
627#endif
628
629 /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead()
630 * which works on virtual addresses. On Linux cannot reliably determine the
631 * physical address for high memory, see rtR0MemObjNativeLockKernel(). */
632 page = alloc_page(GFP_USER);
633 if (!page) {
634 LogRelFunc(("failed to allocate page\n"));
635#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
636 return VM_FAULT_OOM;
637#else
638 SET_TYPE(VM_FAULT_OOM);
639 return NOPAGE_OOM;
640#endif
641 }
642
643 buf = kmap(page);
644#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
645 off = (vmf->pgoff << PAGE_SHIFT);
646#else
647 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
648#endif
649 err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
650 if (err) {
651 kunmap(page);
652 put_page(page);
653#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
654 return VM_FAULT_SIGBUS;
655#else
656 SET_TYPE(VM_FAULT_SIGBUS);
657 return NOPAGE_SIGBUS;
658#endif
659 }
660
661 BUG_ON(nread > PAGE_SIZE);
662 if (!nread) {
663#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
664 clear_user_page(page_address(page), vmf->pgoff, page);
665#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
666 clear_user_page(page_address(page), vaddr, page);
667#else
668 clear_user_page(page_address(page), vaddr);
669#endif
670 } else
671 memset(buf + nread, 0, PAGE_SIZE - nread);
672
673 flush_dcache_page(page);
674 kunmap(page);
675#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
676 vmf->page = page;
677 return 0;
678#else
679 SET_TYPE(VM_FAULT_MAJOR);
680 return page;
681#endif
682}
683
684static struct vm_operations_struct sf_vma_ops = {
685#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
686 .fault = sf_reg_fault
687#else
688 .nopage = sf_reg_nopage
689#endif
690};
691
692static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma)
693{
694 TRACE();
695 if (vma->vm_flags & VM_SHARED) {
696 LogFunc(("shared mmapping not available\n"));
697 return -EINVAL;
698 }
699
700 vma->vm_ops = &sf_vma_ops;
701 return 0;
702}
703
704struct file_operations sf_reg_fops = {
705 .read = sf_reg_read,
706 .open = sf_reg_open,
707 .write = sf_reg_write,
708 .release = sf_reg_release,
709 .mmap = sf_reg_mmap,
710#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
711#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
712/** @todo This code is known to cause caching of data which should not be
713 * cached. Investigate. */
714#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
715 .splice_read = sf_splice_read,
716#else
717 .sendfile = generic_file_sendfile,
718#endif
719 .aio_read = generic_file_aio_read,
720 .aio_write = generic_file_aio_write,
721#endif
722#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
723 .fsync = noop_fsync,
724#else
725 .fsync = simple_sync_file,
726#endif
727 .llseek = generic_file_llseek,
728#endif
729};
730
731struct inode_operations sf_reg_iops = {
732#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
733 .revalidate = sf_inode_revalidate
734#else
735 .getattr = sf_getattr,
736 .setattr = sf_setattr
737#endif
738};
739
740#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
741static int sf_readpage(struct file *file, struct page *page)
742{
743 struct inode *inode = GET_F_DENTRY(file)->d_inode;
744 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
745 struct sf_reg_info *sf_r = file->private_data;
746 uint32_t nread = PAGE_SIZE;
747 char *buf;
748 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
749 int ret;
750
751 TRACE();
752
753 buf = kmap(page);
754 ret = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
755 if (ret) {
756 kunmap(page);
757 if (PageLocked(page))
758 unlock_page(page);
759 return ret;
760 }
761 BUG_ON(nread > PAGE_SIZE);
762 memset(&buf[nread], 0, PAGE_SIZE - nread);
763 flush_dcache_page(page);
764 kunmap(page);
765 SetPageUptodate(page);
766 unlock_page(page);
767 return 0;
768}
769
770static int sf_writepage(struct page *page, struct writeback_control *wbc)
771{
772 struct address_space *mapping = page->mapping;
773 struct inode *inode = mapping->host;
774 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
775 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
776 struct file *file = sf_i->file;
777 struct sf_reg_info *sf_r = file->private_data;
778 char *buf;
779 uint32_t nwritten = PAGE_SIZE;
780 int end_index = inode->i_size >> PAGE_SHIFT;
781 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
782 int err;
783
784 TRACE();
785
786 if (page->index >= end_index)
787 nwritten = inode->i_size & (PAGE_SIZE - 1);
788
789 buf = kmap(page);
790
791 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off);
792 if (err < 0) {
793 ClearPageUptodate(page);
794 goto out;
795 }
796
797 if (off > inode->i_size)
798 inode->i_size = off;
799
800 if (PageError(page))
801 ClearPageError(page);
802 err = 0;
803
804 out:
805 kunmap(page);
806
807 unlock_page(page);
808 return err;
809}
810
811#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
812int sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
813 unsigned len, unsigned flags, struct page **pagep,
814 void **fsdata)
815{
816 TRACE();
817
818 return simple_write_begin(file, mapping, pos, len, flags, pagep,
819 fsdata);
820}
821
822int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
823 unsigned len, unsigned copied, struct page *page, void *fsdata)
824{
825 struct inode *inode = mapping->host;
826 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
827 struct sf_reg_info *sf_r = file->private_data;
828 void *buf;
829 unsigned from = pos & (PAGE_SIZE - 1);
830 uint32_t nwritten = len;
831 int err;
832
833 TRACE();
834
835 buf = kmap(page);
836 err =
837 sf_reg_write_aux(__func__, sf_g, sf_r, buf + from, &nwritten, pos);
838 kunmap(page);
839
840 if (!PageUptodate(page) && err == PAGE_SIZE)
841 SetPageUptodate(page);
842
843 if (err >= 0) {
844 pos += nwritten;
845 if (pos > inode->i_size)
846 inode->i_size = pos;
847 }
848
849 unlock_page(page);
850#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
851 put_page(page);
852#else
853 page_cache_release(page);
854#endif
855
856 return nwritten;
857}
858
859#endif /* KERNEL_VERSION >= 2.6.24 */
860
861struct address_space_operations sf_reg_aops = {
862 .readpage = sf_readpage,
863 .writepage = sf_writepage,
864#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
865 .write_begin = sf_write_begin,
866 .write_end = sf_write_end,
867#else
868 .prepare_write = simple_prepare_write,
869 .commit_write = simple_commit_write,
870#endif
871};
872#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette