Unverified Commit e53d678d authored by Matthew Macy's avatar Matthew Macy Committed by GitHub
Browse files

Share zfs_fsync, zfs_read, zfs_write, et al between Linux and FreeBSD


The zfs_fsync, zfs_read, and zfs_write function are almost identical
between Linux and FreeBSD.  With a little refactoring they can be
moved to the common code which is what is done by this commit.
Reviewed-by: default avatarBrian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: default avatarRyan Moeller <ryan@iXsystems.com>
Signed-off-by: default avatarMatt Macy <mmacy@FreeBSD.org>
Closes #11078 
parent 666aa69f
base NAS-113163 NAS-116738 NAS-116738-prev NAS-117105 NAS-117807 NAS-117807-1 NAS-117845-dbg NAS-117845-dbg2-ozfs-mater NAS-117880 NAS-119759-1 NAS-121790 NAS-122915 NAS-122949 NAS-123279-2 NAS-124189-cobia NAS-124564 NAS-124699 NAS-124699-2 NAS-124699-3 NAS-124699-3-test NAS-125150 NAS-125882 NAS-125882-2 NAS-125916 NAS-125916-1 NAS-127352 NAS-127352-tn NAS-127702 NAS-127702-1 NAS-127702-df NAS-127822 NAS-127822-debug NAS-127888 NAS-127888-truenas TS-23.10 add-check-for-cached-acl-in-zpl_permission add-ctldir-snapcount add-fd-to-zhandle add-json-output-to-zfs add-xattr-related-flag allow-path-to-zhandle-in-ctldir backport-large_block dragonfish/zfs-2.2.4-staging experiment-with-createtxg fix-abi fix-archive-behavior fix-dosmode-behavior-linux fix-mode-007 freebsd-simplify-trivial-acl-check integrate master nfsacl nfsacl-1 nfsacl-a nfsacl-review nfsv4acl nfsv4acls ozfs_master_test pkg-test raidz-expansion-rebase rel-v0.0.1 release-test/22.02.3 release/21.06-BETA.1 release/21.08-BETA.1 release/21.08-BETA.2 release/22.02 release/22.02-RC.1 release/22.02-RC.2 release/22.02-test release/22.02.1 release/22.02.2 release/22.02.3 release/22.02.4 release/22.12 release/22.12-BETA.1 release/22.12-BETA.2 release/22.12-RC.1 release/22.12.1 release/22.12.2 release/22.12.3 release/22.12.3.3 release/22.12.4 release/22.2-RC.1 release/23.10-BETA.1 release/23.10-RC.1 release/23.10.0 release/23.10.1 release/23.10.1.2 release/23.10.1.3 release/23.10.2 release/24.04-BETA.1 release/24.04-RC.1 release/24.04.0 set-sast-config-1 stable/angelfish stable/bluefin stable/cobia stable/dragonfish streaminfo_xattr test test-ci test-inode-owner-change test-linux-stat-zfscltdir test_pkg testing-refine-branchout-process testing-refine-branchout-process2 testing/fix-sendrecv tmprelease/test-21.08 tmprelease/test-21.09 tmprelease/test2-21.09 tmprelease/test3-21.09 tmprelease/test4-21.09 tn_master truenas/13.0-u5.3-stable truenas/NAS-127822-debug truenas/dragonfish-2.2.2-test truenas/zfs-2.1-release truenas/zfs-2.1.13-upstream truenas/zfs-2.2-release truenas/zfs-2.2.1-hutter truenas/zfs-2.2.1-hutter2 truenas/zfs-2.2.1-release truenas/zfs-2.2.1-release-dragonfish truenas/zfs-2.2.3-staging-dragonfish truenas/zfs-2.2.3-testing truenas/zfs-2.2.4-staging truenas/zfs-2.3-release truenas/zfs-2.3-testing truenas/zfs-cobia-rc1-test truenas/zvol-multi-taskq truenas/zvol-multiq truenas/zvol-multiq-clean truenas/zvol-thread-property zfetch_reorder zfetch_reorder10 zfs-2.1-release zfs-issue-13217 zfs-json zfs-ozfs-master zvol-cleanup zvol-improvements-2.2.1 zvol-ro-property zvol-thread-property zvol_multi_taskq zvol_multiq zfs-2.1.99 zfs-2.1.2 zfs-2.1.1 zfs-2.1.0 zfs-2.1.0-rc8 zfs-2.1.0-rc7 zfs-2.1.0-rc6 zfs-2.1.0-rc5 zfs-2.1.0-rc4 zfs-2.1.0-rc3 zfs-2.1.0-rc2 zfs-2.1.0-rc1 TS-24.04-RC.1 TS-24.04-BETA.1 TS-23.10.2 TS-23.10.1.3 TS-23.10.1.2 TS-23.10.1.1 TS-23.10.1 TS-23.10.0.1 TS-23.10.0 TS-23.10-RC.1 TS-23.10-BETA.1 TS-22.12.4.2 TS-22.12.4.1 TS-22.12.4 TS-22.12.3.3 TS-22.12.3.2 TS-22.12.3.1 TS-22.12.3 TS-22.12.2 TS-22.12.1 TS-22.12.0 TS-22.12-RC.1 TS-22.12-BETA.2 TS-22.12-BETA.1 TS-22.12-ALPHA.1 TS-22.02.4 TS-22.02.3 TS-22.02.2.1 TS-22.02.2 TS-22.02.1 TS-22.2.1 TS-22.02.0.1 TS-22.02.0 TS-22.2.0 TS-22.02.RELEASE.1 TS-22.02-RC.2 TS-22.02-RC.1 TS-22.02-RC.1-2 TS-22.02-RC.1-1 TS-21.08-BETA.2 TS-21.08-BETA.1 TS-21.06-BETA.1 TS-12.12.3 DN110M-CS-v2.0
No related merge requests found
Showing with 102 additions and 624 deletions
+102 -624
......@@ -53,4 +53,7 @@ struct opensolaris_utsname {
extern char hw_serial[11];
#define task_io_account_read(n)
#define task_io_account_write(n)
#endif /* _OPENSOLARIS_SYS_MISC_H_ */
......@@ -34,6 +34,7 @@
#include <sys/vnode.h>
struct mount;
struct vattr;
struct znode;
int secpolicy_nfs(cred_t *cr);
int secpolicy_zfs(cred_t *crd);
......@@ -57,7 +58,7 @@ int secpolicy_vnode_setattr(cred_t *cr, vnode_t *vp, struct vattr *vap,
int unlocked_access(void *, int, cred_t *), void *node);
int secpolicy_vnode_create_gid(cred_t *cr);
int secpolicy_vnode_setids_setgids(vnode_t *vp, cred_t *cr, gid_t gid);
int secpolicy_vnode_setid_retain(vnode_t *vp, cred_t *cr,
int secpolicy_vnode_setid_retain(struct znode *zp, cred_t *cr,
boolean_t issuidroot);
void secpolicy_setid_clear(struct vattr *vap, vnode_t *vp, cred_t *cr);
int secpolicy_setid_setsticky_clear(vnode_t *vp, struct vattr *vap,
......
......@@ -82,6 +82,7 @@ void uioskip(uio_t *uiop, size_t n);
#define uio_iovcnt(uio) (uio)->uio_iovcnt
#define uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len
#define uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base
#define uio_fault_disable(uio, set)
static inline void
uio_iov_at_index(uio_t *uio, uint_t idx, void **base, uint64_t *len)
......
......@@ -8,7 +8,7 @@ KERNEL_H = \
zfs_dir.h \
zfs_ioctl_compat.h \
zfs_vfsops_os.h \
zfs_vnops.h \
zfs_vnops_os.h \
zfs_znode_impl.h \
zpl.h
......
......@@ -42,6 +42,7 @@
#include <linux/types.h>
#define cond_resched() kern_yield(PRI_USER)
#define uio_prefaultpages(size, uio) (0)
#define taskq_create_sysdc(a, b, d, e, p, dc, f) \
(taskq_create(a, b, maxclsyspri, d, e, f))
......
......@@ -26,8 +26,9 @@
* $FreeBSD$
*/
#ifndef _SYS_ZFS_VNOPS_H_
#define _SYS_ZFS_VNOPS_H_
#ifndef _SYS_FS_ZFS_VNOPS_OS_H
#define _SYS_FS_ZFS_VNOPS_OS_H
int dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, struct vm_page **ppa, dmu_tx_t *tx);
int dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
......
......@@ -39,6 +39,7 @@
#include <sys/zfs_acl.h>
#include <sys/zil.h>
#include <sys/zfs_project.h>
#include <vm/vm_object.h>
#ifdef __cplusplus
extern "C" {
......@@ -113,7 +114,10 @@ extern minor_t zfsdev_minor_alloc(void);
#define Z_ISBLK(type) ((type) == VBLK)
#define Z_ISCHR(type) ((type) == VCHR)
#define Z_ISLNK(type) ((type) == VLNK)
#define Z_ISDIR(type) ((type) == VDIR)
#define zn_has_cached_data(zp) vn_has_cached_data(ZTOV(zp))
#define zn_rlimit_fsize(zp, uio, td) vn_rlimit_fsize(ZTOV(zp), (uio), (td))
/* Called on entry to each ZFS vnode and vfs operation */
#define ZFS_ENTER(zfsvfs) \
......@@ -175,7 +179,7 @@ extern int zfsfstype;
extern int zfs_znode_parent_and_name(struct znode *zp, struct znode **dzpp,
char *buf);
extern void zfs_inode_update(struct znode *);
#ifdef __cplusplus
}
#endif
......
......@@ -74,6 +74,7 @@ enum scope_prefix_types {
zfs_vdev_cache,
zfs_vdev_file,
zfs_vdev_mirror,
zfs_vnops,
zfs_zevent,
zfs_zio,
zfs_zil
......
......@@ -59,7 +59,6 @@ typedef struct uio {
boolean_t uio_fault_disable;
uint16_t uio_fmode;
uint16_t uio_extflg;
offset_t uio_limit;
ssize_t uio_resid;
size_t uio_skip;
} uio_t;
......@@ -113,6 +112,7 @@ typedef struct xuio {
#define uio_iovcnt(uio) (uio)->uio_iovcnt
#define uio_iovlen(uio, idx) (uio)->uio_iov[(idx)].iov_len
#define uio_iovbase(uio, idx) (uio)->uio_iov[(idx)].iov_base
#define uio_fault_disable(uio, set) (uio)->uio_fault_disable = set
static inline void
uio_iov_at_index(uio_t *uio, uint_t idx, void **base, uint64_t *len)
......
......@@ -21,7 +21,7 @@ KERNEL_H = \
zfs_ctldir.h \
zfs_dir.h \
zfs_vfsops_os.h \
zfs_vnops.h \
zfs_vnops_os.h \
zfs_znode_impl.h \
zpl.h
......
......@@ -35,6 +35,8 @@
#include <sys/xvattr.h>
#include <sys/zpl.h>
struct znode;
int secpolicy_nfs(const cred_t *);
int secpolicy_sys_config(const cred_t *, boolean_t);
int secpolicy_vnode_access2(const cred_t *, struct inode *,
......@@ -44,7 +46,7 @@ int secpolicy_vnode_chown(const cred_t *, uid_t);
int secpolicy_vnode_create_gid(const cred_t *);
int secpolicy_vnode_remove(const cred_t *);
int secpolicy_vnode_setdac(const cred_t *, uid_t);
int secpolicy_vnode_setid_retain(const cred_t *, boolean_t);
int secpolicy_vnode_setid_retain(struct znode *, const cred_t *, boolean_t);
int secpolicy_vnode_setids_setgids(const cred_t *, gid_t);
int secpolicy_zinject(const cred_t *);
int secpolicy_zfs(const cred_t *);
......
......@@ -22,8 +22,8 @@
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _SYS_FS_ZFS_VNOPS_H
#define _SYS_FS_ZFS_VNOPS_H
#ifndef _SYS_FS_ZFS_VNOPS_OS_H
#define _SYS_FS_ZFS_VNOPS_OS_H
#include <sys/vnode.h>
#include <sys/xvattr.h>
......@@ -41,8 +41,6 @@ extern "C" {
extern int zfs_open(struct inode *ip, int mode, int flag, cred_t *cr);
extern int zfs_close(struct inode *ip, int flag, cred_t *cr);
extern int zfs_holey(struct inode *ip, int cmd, loff_t *off);
extern int zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr);
extern int zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr);
extern int zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *resid);
extern int zfs_access(struct inode *ip, int mode, int flag, cred_t *cr);
......@@ -58,7 +56,6 @@ extern int zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap,
extern int zfs_rmdir(znode_t *dzp, char *name, znode_t *cwd,
cred_t *cr, int flags);
extern int zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr);
extern int zfs_fsync(znode_t *zp, int syncflag, cred_t *cr);
extern int zfs_getattr_fast(struct inode *ip, struct kstat *sp);
extern int zfs_setattr(znode_t *zp, vattr_t *vap, int flag, cred_t *cr);
extern int zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp,
......@@ -72,10 +69,6 @@ extern void zfs_inactive(struct inode *ip);
extern int zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
offset_t offset, cred_t *cr);
extern int zfs_fid(struct inode *ip, fid_t *fidp);
extern int zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag,
cred_t *cr);
extern int zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag,
cred_t *cr);
extern int zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages);
extern int zfs_putpage(struct inode *ip, struct page *pp,
struct writeback_control *wbc);
......
......@@ -68,6 +68,10 @@ extern "C" {
#define Z_ISCHR(type) S_ISCHR(type)
#define Z_ISLNK(type) S_ISLNK(type)
#define Z_ISDEV(type) (S_ISCHR(type) || S_ISBLK(type) || S_ISFIFO(type))
#define Z_ISDIR(type) S_ISDIR(type)
#define zn_has_cached_data(zp) ((zp)->z_is_mapped)
#define zn_rlimit_fsize(zp, uio, td) (0)
#define zhold(zp) igrab(ZTOI((zp)))
#define zrele(zp) iput(ZTOI((zp)))
......@@ -143,6 +147,8 @@ do { \
} while (0)
#endif /* HAVE_INODE_TIMESPEC64_TIMES */
#define ZFS_ACCESSTIME_STAMP(zfsvfs, zp)
struct znode;
extern int zfs_sync(struct super_block *, int, cred_t *);
......
......@@ -117,6 +117,7 @@ COMMON_H = \
zfs_stat.h \
zfs_sysfs.h \
zfs_vfsops.h \
zfs_vnops.h \
zfs_znode.h \
zil.h \
zil_impl.h \
......
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _SYS_FS_ZFS_VNOPS_H
#define _SYS_FS_ZFS_VNOPS_H
#include <sys/zfs_vnops_os.h>
extern int zfs_fsync(znode_t *, int, cred_t *);
extern int zfs_read(znode_t *, uio_t *, int, cred_t *);
extern int zfs_write(znode_t *, uio_t *, int, cred_t *);
extern int zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr);
extern int zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr);
extern int mappedread(znode_t *, int, uio_t *);
extern int mappedread_sf(znode_t *, int, uio_t *);
extern void update_pages(znode_t *, int64_t, int, objset_t *, uint64_t);
#endif
......@@ -73,7 +73,6 @@ typedef struct uio {
uio_seg_t uio_segflg; /* address space (kernel or user) */
uint16_t uio_fmode; /* file mode flags */
uint16_t uio_extflg; /* extended flags */
offset_t uio_limit; /* u-limit (maximum byte offset) */
ssize_t uio_resid; /* residual count */
} uio_t;
......
......@@ -101,9 +101,10 @@ SRCS+= nvpair.c \
#os/freebsd/spl
SRCS+= acl_common.c \
btree.c \
callb.c \
list.c \
sha256c.c \
sha512c.c \
spl_acl.c \
spl_cmn_err.c \
spl_dtrace.c \
......@@ -111,6 +112,7 @@ SRCS+= acl_common.c \
spl_kstat.c \
spl_misc.c \
spl_policy.c \
spl_procfs_list.c \
spl_string.c \
spl_sunddi.c \
spl_sysevent.c \
......@@ -118,11 +120,8 @@ SRCS+= acl_common.c \
spl_uio.c \
spl_vfs.c \
spl_vm.c \
spl_zone.c \
sha256c.c \
sha512c.c \
spl_procfs_list.c \
spl_zlib.c
spl_zlib.c \
spl_zone.c
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
......@@ -132,6 +131,7 @@ SRCS+= spl_atomic.c
#os/freebsd/zfs
SRCS+= abd_os.c \
arc_os.c \
crypto_os.c \
dmu_os.c \
hkdf.c \
......@@ -139,17 +139,16 @@ SRCS+= abd_os.c \
spa_os.c \
sysctl_os.c \
vdev_file.c \
vdev_label_os.c \
vdev_geom.c \
vdev_label_os.c \
zfs_acl.c \
zfs_ctldir.c \
zfs_debug.c \
zfs_dir.c \
zfs_ioctl_compat.c \
zfs_ioctl_os.c \
zfs_log.c \
zfs_replay.c \
zfs_vfsops.c \
zfs_vnops.c \
zfs_vnops_os.c \
zfs_znode.c \
zio_crypt.c \
zvol_os.c
......@@ -177,10 +176,10 @@ SRCS+= zfeature_common.c \
SRCS+= abd.c \
aggsum.c \
arc.c \
arc_os.c \
blkptr.c \
bplist.c \
bpobj.c \
btree.c \
cityhash.c \
dbuf.c \
dbuf_stats.c \
......@@ -275,16 +274,18 @@ SRCS+= abd.c \
zcp_synctask.c \
zfeature.c \
zfs_byteswap.c \
zfs_debug.c \
zfs_file_os.c \
zfs_fm.c \
zfs_fuid.c \
zfs_ioctl.c \
zfs_log.c \
zfs_onexit.c \
zfs_quota.c \
zfs_ratelimit.c \
zfs_replay.c \
zfs_rlock.c \
zfs_sa.c \
zfs_vnops.c \
zil.c \
zio.c \
zio_checksum.c \
......@@ -322,7 +323,7 @@ CFLAGS.spl_vm.c= -Wno-cast-qual
CFLAGS.spl_zlib.c= -Wno-cast-qual
CFLAGS.abd.c= -Wno-cast-qual
CFLAGS.zfs_log.c= -Wno-cast-qual
CFLAGS.zfs_vnops.c= -Wno-pointer-arith
CFLAGS.zfs_vnops_os.c= -Wno-pointer-arith
CFLAGS.u8_textprep.c= -Wno-cast-qual
CFLAGS.zfs_fletcher.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_intel.c= -Wno-cast-qual -Wno-pointer-arith
......
......@@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$");
#include <sys/jail.h>
#include <sys/policy.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
int
......@@ -312,11 +313,11 @@ secpolicy_vnode_setids_setgids(vnode_t *vp, cred_t *cr, gid_t gid)
}
int
secpolicy_vnode_setid_retain(vnode_t *vp, cred_t *cr,
secpolicy_vnode_setid_retain(znode_t *zp, cred_t *cr,
boolean_t issuidroot __unused)
{
if (secpolicy_fs_owner(vp->v_mount, cr) == 0)
if (secpolicy_fs_owner(ZTOV(zp)->v_mount, cr) == 0)
return (0);
return (spl_priv_check_cred(cr, PRIV_VFS_RETAINSUGID));
}
......
......@@ -114,6 +114,7 @@ SYSCTL_NODE(_vfs_zfs, OID_AUTO, spa, CTLFLAG_RW, 0, "ZFS space allocation");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RW, 0, "ZFS TRIM");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS transaction group");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vnops, CTLFLAG_RW, 0, "ZFS VNOPS");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zevent, CTLFLAG_RW, 0, "ZFS event");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zil, CTLFLAG_RW, 0, "ZFS ZIL");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO");
......
......@@ -525,16 +525,15 @@ page_unhold(vm_page_t pp)
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
static void
update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
int segflg, dmu_tx_t *tx)
void
update_pages(znode_t *zp, int64_t start, int len, objset_t *os, uint64_t oid)
{
vm_object_t obj;
struct sf_buf *sf;
vnode_t *vp = ZTOV(zp);
caddr_t va;
int off;
ASSERT(segflg != UIO_NOCOPY);
ASSERT(vp->v_mount != NULL);
obj = vp->v_object;
ASSERT(obj != NULL);
......@@ -579,10 +578,10 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
* map them into contiguous KVA region and populate them
* in one single dmu_read() call.
*/
static int
mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
int
mappedread_sf(znode_t *zp, int nbytes, uio_t *uio)
{
znode_t *zp = VTOZ(vp);
vnode_t *vp = ZTOV(zp);
objset_t *os = zp->z_zfsvfs->z_os;
struct sf_buf *sf;
vm_object_t obj;
......@@ -664,10 +663,10 @@ mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
static int
mappedread(vnode_t *vp, int nbytes, uio_t *uio)
int
mappedread(znode_t *zp, int nbytes, uio_t *uio)
{
znode_t *zp = VTOZ(vp);
vnode_t *vp = ZTOV(zp);
vm_object_t obj;
int64_t start;
int len = nbytes;
......@@ -710,523 +709,6 @@ mappedread(vnode_t *vp, int nbytes, uio_t *uio)
return (error);
}
offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
/*
* Read bytes from specified file into supplied buffer.
*
* IN: vp - vnode of file to be read from.
* uio - structure supplying read location, range info,
* and return buffer.
* ioflag - SYNC flags; used to provide FRSYNC semantics.
* cr - credentials of caller.
* ct - caller context
*
* OUT: uio - updated offset and range, buffer filled.
*
* RETURN: 0 on success, error code on failure.
*
* Side Effects:
* vp - atime updated if byte count > 0
*/
/* ARGSUSED */
static int
zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ssize_t n, nbytes, start_resid;
int error = 0;
int64_t nread;
zfs_locked_range_t *lr;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
/* We don't copy out anything useful for directories. */
if (vp->v_type == VDIR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EISDIR));
}
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EACCES));
}
/*
* Validate file offset
*/
if (uio->uio_loffset < (offset_t)0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Fasttrack empty reads
*/
if (uio->uio_resid == 0) {
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* If we're in FRSYNC mode, sync out this znode before reading it.
*/
if (zfsvfs->z_log &&
(ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
zil_commit(zfsvfs->z_log, zp->z_id);
/*
* Lock the range against changes.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, uio->uio_loffset,
uio->uio_resid, RL_READER);
/*
* If we are reading past end-of-file we can skip
* to the end; but we might still need to set atime.
*/
if (uio->uio_loffset >= zp->z_size) {
error = 0;
goto out;
}
ASSERT(uio->uio_loffset < zp->z_size);
n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
start_resid = n;
while (n > 0) {
nbytes = MIN(n, zfs_read_chunk_size -
P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
if (uio->uio_segflg == UIO_NOCOPY)
error = mappedread_sf(vp, nbytes, uio);
else if (vn_has_cached_data(vp)) {
error = mappedread(vp, nbytes, uio);
} else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes);
}
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
n -= nbytes;
}
nread = start_resid - n;
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
out:
zfs_rangelock_exit(lr);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Write the bytes to a file.
*
* IN: vp - vnode of file to be written to.
* uio - structure supplying write location, range info,
* and data buffer.
* ioflag - FAPPEND, FSYNC, and/or FDSYNC. FAPPEND is
* set if in append mode.
* cr - credentials of caller.
* ct - caller context (NFS/CIFS fem monitor only)
*
* OUT: uio - updated offset and range.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - ctime|mtime updated if byte count > 0
*/
/* ARGSUSED */
static int
zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr)
{
znode_t *zp = VTOZ(vp);
rlim64_t limit = MAXOFFSET_T;
ssize_t start_resid = uio->uio_resid;
ssize_t tx_bytes;
uint64_t end_size;
dmu_buf_impl_t *db;
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zilog_t *zilog;
offset_t woff;
ssize_t n, nbytes;
zfs_locked_range_t *lr;
int max_blksz = zfsvfs->z_max_blksz;
int error = 0;
arc_buf_t *abuf;
iovec_t *aiov = NULL;
xuio_t *xuio = NULL;
int i_iov = 0;
int iovcnt __unused = uio->uio_iovcnt;
iovec_t *iovp = uio->uio_iov;
int write_eof;
int count = 0;
sa_bulk_attr_t bulk[4];
uint64_t mtime[2], ctime[2];
uint64_t uid, gid, projid;
int64_t nwritten;
/*
* Fasttrack empty write
*/
n = start_resid;
if (n == 0)
return (0);
if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
limit = MAXOFFSET_T;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EROFS));
}
/*
* If immutable or not appending then return EPERM.
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common()
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) ||
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
(uio->uio_loffset < zp->z_size))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
zilog = zfsvfs->z_log;
/*
* Validate file offset
*/
woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
if (woff < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* If in append mode, set the io offset pointer to eof.
*/
if (ioflag & FAPPEND) {
/*
* Obtain an appending range lock to guarantee file append
* semantics. We reset the write offset once we have the lock.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
woff = lr->lr_offset;
if (lr->lr_length == UINT64_MAX) {
/*
* We overlocked the file because this write will cause
* the file block size to increase.
* Note that zp_size cannot change with this lock held.
*/
woff = zp->z_size;
}
uio->uio_loffset = woff;
} else {
/*
* Note that if the file block size will change as a result of
* this write, then this range lock will lock the entire file
* so that we can re-write the block safely.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
}
if (vn_rlimit_fsize(vp, uio, uio->uio_td)) {
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (EFBIG);
}
if (woff >= limit) {
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EFBIG));
}
if ((woff + n) > limit || woff > (limit - n))
n = limit - woff;
/* Will this write extend the file length? */
write_eof = (woff + n > zp->z_size);
end_size = MAX(zp->z_size, woff + n);
uid = zp->z_uid;
gid = zp->z_gid;
projid = zp->z_projid;
/*
* Write the file in reasonable size chunks. Each chunk is written
* in a separate transaction; this keeps the intent log records small
* and allows us to do more fine-grained space accounting.
*/
while (n > 0) {
woff = uio->uio_loffset;
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
(projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
projid))) {
error = SET_ERROR(EDQUOT);
break;
}
abuf = NULL;
if (xuio) {
ASSERT(i_iov < iovcnt);
aiov = &iovp[i_iov];
abuf = dmu_xuio_arcbuf(xuio, i_iov);
dmu_xuio_clear(xuio, i_iov);
DTRACE_PROBE3(zfs_cp_write, int, i_iov,
iovec_t *, aiov, arc_buf_t *, abuf);
ASSERT((aiov->iov_base == abuf->b_data) ||
((char *)aiov->iov_base - (char *)abuf->b_data +
aiov->iov_len == arc_buf_size(abuf)));
i_iov++;
} else if (n >= max_blksz &&
woff >= zp->z_size &&
P2PHASE(woff, max_blksz) == 0 &&
zp->z_blksz == max_blksz) {
/*
* This write covers a full block. "Borrow" a buffer
* from the dmu so that we can fill it before we enter
* a transaction. This avoids the possibility of
* holding up the transaction if the data copy hangs
* up on a pagefault (e.g., from an NFS server mapping).
*/
size_t cbytes;
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
max_blksz);
ASSERT(abuf != NULL);
ASSERT(arc_buf_size(abuf) == max_blksz);
if ((error = uiocopy(abuf->b_data, max_blksz,
UIO_WRITE, uio, &cbytes))) {
dmu_return_arcbuf(abuf);
break;
}
ASSERT(cbytes == max_blksz);
}
/*
* Start a transaction.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
DB_DNODE_ENTER(db);
dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
MIN(n, max_blksz));
DB_DNODE_EXIT(db);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
if (abuf != NULL)
dmu_return_arcbuf(abuf);
break;
}
/*
* If zfs_range_lock() over-locked we grow the blocksize
* and then reduce the lock range. This will only happen
* on the first iteration since zfs_range_reduce() will
* shrink down r_len to the appropriate size.
*/
if (lr->lr_length == UINT64_MAX) {
uint64_t new_blksz;
if (zp->z_blksz > max_blksz) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
ASSERT(!ISP2(zp->z_blksz));
new_blksz = MIN(end_size,
1 << highbit64(zp->z_blksz));
} else {
new_blksz = MIN(end_size, max_blksz);
}
zfs_grow_blocksize(zp, new_blksz, tx);
zfs_rangelock_reduce(lr, woff, n);
}
/*
* XXX - should we really limit each write to z_max_blksz?
* Perhaps we should use SPA_MAXBLOCKSIZE chunks?
*/
nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
if (woff + nbytes > zp->z_size)
vnode_pager_setsize(vp, woff + nbytes);
if (abuf == NULL) {
tx_bytes = uio->uio_resid;
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes, tx);
tx_bytes -= uio->uio_resid;
} else {
tx_bytes = nbytes;
ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
/*
* If this is not a full block write, but we are
* extending the file past EOF and this data starts
* block-aligned, use assign_arcbuf(). Otherwise,
* write via dmu_write().
*/
if (tx_bytes < max_blksz && (!write_eof ||
aiov->iov_base != abuf->b_data)) {
ASSERT(xuio);
dmu_write(zfsvfs->z_os, zp->z_id, woff,
aiov->iov_len, aiov->iov_base, tx);
dmu_return_arcbuf(abuf);
xuio_stat_wbuf_copied();
} else {
ASSERT(xuio || tx_bytes == max_blksz);
dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl), woff,
abuf, tx);
}
ASSERT(tx_bytes <= uio->uio_resid);
uioskip(uio, tx_bytes);
}
if (tx_bytes && vn_has_cached_data(vp)) {
update_pages(vp, woff, tx_bytes, zfsvfs->z_os,
zp->z_id, uio->uio_segflg, tx);
}
/*
* If we made no progress, we're done. If we made even
* partial progress, update the znode and ZIL accordingly.
*/
if (tx_bytes == 0) {
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
dmu_tx_commit(tx);
ASSERT(error != 0);
break;
}
/*
* Clear Set-UID/Set-GID bits on successful write if not
* privileged and at least one of the execute bits is set.
*
* It would be nice to to this after all writes have
* been done, but that would still expose the ISUID/ISGID
* to another app after the partial write is committed.
*
* Note: we don't call zfs_fuid_map_id() here because
* user 0 is not an ephemeral uid.
*/
mutex_enter(&zp->z_acl_lock);
if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
(S_IXUSR >> 6))) != 0 &&
(zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
secpolicy_vnode_setid_retain(vp, cr,
(zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
uint64_t newmode;
zp->z_mode &= ~(S_ISUID | S_ISGID);
newmode = zp->z_mode;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
(void *)&newmode, sizeof (uint64_t), tx);
}
mutex_exit(&zp->z_acl_lock);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
/*
* Update the file size (zp_size) if it has changed;
* account for possible concurrent updates.
*/
while ((end_size = zp->z_size) < uio->uio_loffset) {
(void) atomic_cas_64(&zp->z_size, end_size,
uio->uio_loffset);
ASSERT(error == 0 || error == EFAULT);
}
/*
* If we are replaying and eof is non zero then force
* the file size to the specified eof. Note, there's no
* concurrency during replay.
*/
if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
zp->z_size = zfsvfs->z_replay_eof;
if (error == 0)
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
else
(void) sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes,
ioflag, NULL, NULL);
dmu_tx_commit(tx);
if (error != 0)
break;
ASSERT(tx_bytes == nbytes);
n -= nbytes;
}
zfs_rangelock_exit(lr);
/*
* If we're in replay mode, or we made no progress, return error.
* Otherwise, it's at least a partial write, so it's successful.
*/
if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* EFAULT means that at least one page of the source buffer was not
* available. VFS will re-try remaining I/O upon this error.
*/
if (error == EFAULT) {
ZFS_EXIT(zfsvfs);
return (error);
}
if (ioflag & (FSYNC | FDSYNC) ||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, zp->z_id);
nwritten = start_resid - uio->uio_resid;
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
ZFS_EXIT(zfsvfs);
return (0);
}
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *presid)
......@@ -2704,27 +2186,6 @@ update:
return (error);
}
ulong_t zfs_fsync_sync_cnt = 4;
static int
zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
(void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
zil_commit(zfsvfs->z_log, zp->z_id);
ZFS_EXIT(zfsvfs);
}
tsd_set(zfs_fsyncer_key, NULL);
return (0);
}
/*
* Get the requested file attributes and place them in the provided
* vattr structure.
......@@ -4789,45 +4250,6 @@ zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
}
}
/*ARGSUSED*/
static int
zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
caller_context_t *ct)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
error = zfs_getacl(zp, vsecp, skipaclchk, cr);
ZFS_EXIT(zfsvfs);
return (error);
}
/*ARGSUSED*/
int
zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
zilog_t *zilog = zfsvfs->z_log;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
error = zfs_setacl(zp, vsecp, skipaclchk, cr);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
static int
zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
int *rahead)
......@@ -5221,7 +4643,7 @@ static int
zfs_freebsd_read(struct vop_read_args *ap)
{
return (zfs_read(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
return (zfs_read(VTOZ(ap->a_vp), ap->a_uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
......@@ -5238,7 +4660,7 @@ static int
zfs_freebsd_write(struct vop_write_args *ap)
{
return (zfs_write(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
return (zfs_write(VTOZ(ap->a_vp), ap->a_uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
......@@ -5508,7 +4930,7 @@ zfs_freebsd_fsync(struct vop_fsync_args *ap)
{
vop_stdfsync(ap);
return (zfs_fsync(ap->a_vp, 0, ap->a_td->td_ucred, NULL));
return (zfs_fsync(VTOZ(ap->a_vp), 0, ap->a_td->td_ucred));
}
#ifndef _SYS_SYSPROTO_H_
......@@ -6374,7 +5796,8 @@ zfs_freebsd_getacl(struct vop_getacl_args *ap)
return (EINVAL);
vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
if ((error = zfs_getsecattr(ap->a_vp, &vsecattr, 0, ap->a_cred, NULL)))
if ((error = zfs_getsecattr(VTOZ(ap->a_vp),
&vsecattr, 0, ap->a_cred)))
return (error);
error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment