Merge branches 'work.namei', 'work.dcache' and 'work.iov_iter' into for-linus
This commit is contained in:
@ -181,7 +181,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
|
|||||||
if (!skb)
|
if (!skb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (copy_from_iter(skb_put(skb, len), len, from) != len) {
|
if (!copy_from_iter_full(skb_put(skb, len), len, from)) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
@ -673,7 +673,6 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
|||||||
int depth;
|
int depth;
|
||||||
bool zerocopy = false;
|
bool zerocopy = false;
|
||||||
size_t linear;
|
size_t linear;
|
||||||
ssize_t n;
|
|
||||||
|
|
||||||
if (q->flags & IFF_VNET_HDR) {
|
if (q->flags & IFF_VNET_HDR) {
|
||||||
vnet_hdr_len = q->vnet_hdr_sz;
|
vnet_hdr_len = q->vnet_hdr_sz;
|
||||||
@ -684,8 +683,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
|||||||
len -= vnet_hdr_len;
|
len -= vnet_hdr_len;
|
||||||
|
|
||||||
err = -EFAULT;
|
err = -EFAULT;
|
||||||
n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
|
if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
|
||||||
if (n != sizeof(vnet_hdr))
|
|
||||||
goto err;
|
goto err;
|
||||||
iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
|
iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
|
||||||
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
|
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
|
||||||
|
@ -1171,7 +1171,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||||||
bool zerocopy = false;
|
bool zerocopy = false;
|
||||||
int err;
|
int err;
|
||||||
u32 rxhash;
|
u32 rxhash;
|
||||||
ssize_t n;
|
|
||||||
|
|
||||||
if (!(tun->dev->flags & IFF_UP))
|
if (!(tun->dev->flags & IFF_UP))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
@ -1181,8 +1180,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
len -= sizeof(pi);
|
len -= sizeof(pi);
|
||||||
|
|
||||||
n = copy_from_iter(&pi, sizeof(pi), from);
|
if (!copy_from_iter_full(&pi, sizeof(pi), from))
|
||||||
if (n != sizeof(pi))
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1191,8 +1189,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
len -= tun->vnet_hdr_sz;
|
len -= tun->vnet_hdr_sz;
|
||||||
|
|
||||||
n = copy_from_iter(&gso, sizeof(gso), from);
|
if (!copy_from_iter_full(&gso, sizeof(gso), from))
|
||||||
if (n != sizeof(gso))
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
|
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
|
||||||
|
@ -57,9 +57,6 @@ static void ll_release(struct dentry *de)
|
|||||||
|
|
||||||
LASSERT(de);
|
LASSERT(de);
|
||||||
lld = ll_d2d(de);
|
lld = ll_d2d(de);
|
||||||
if (!lld) /* NFS copies the de->d_op methods (bug 4655) */
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (lld->lld_it) {
|
if (lld->lld_it) {
|
||||||
ll_intent_release(lld->lld_it);
|
ll_intent_release(lld->lld_it);
|
||||||
kfree(lld->lld_it);
|
kfree(lld->lld_it);
|
||||||
@ -126,30 +123,13 @@ static int ll_ddelete(const struct dentry *de)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ll_d_init(struct dentry *de)
|
static int ll_d_init(struct dentry *de)
|
||||||
{
|
{
|
||||||
CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n",
|
struct ll_dentry_data *lld = kzalloc(sizeof(*lld), GFP_KERNEL);
|
||||||
de, de, de->d_parent, d_inode(de), d_count(de));
|
if (unlikely(!lld))
|
||||||
|
return -ENOMEM;
|
||||||
if (!de->d_fsdata) {
|
lld->lld_invalid = 1;
|
||||||
struct ll_dentry_data *lld;
|
de->d_fsdata = lld;
|
||||||
|
|
||||||
lld = kzalloc(sizeof(*lld), GFP_NOFS);
|
|
||||||
if (likely(lld)) {
|
|
||||||
spin_lock(&de->d_lock);
|
|
||||||
if (likely(!de->d_fsdata)) {
|
|
||||||
de->d_fsdata = lld;
|
|
||||||
__d_lustre_invalidate(de);
|
|
||||||
} else {
|
|
||||||
kfree(lld);
|
|
||||||
}
|
|
||||||
spin_unlock(&de->d_lock);
|
|
||||||
} else {
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
LASSERT(de->d_op == &ll_d_ops);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,6 +280,7 @@ static int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
|
|||||||
}
|
}
|
||||||
|
|
||||||
const struct dentry_operations ll_d_ops = {
|
const struct dentry_operations ll_d_ops = {
|
||||||
|
.d_init = ll_d_init,
|
||||||
.d_revalidate = ll_revalidate_nd,
|
.d_revalidate = ll_revalidate_nd,
|
||||||
.d_release = ll_release,
|
.d_release = ll_release,
|
||||||
.d_delete = ll_ddelete,
|
.d_delete = ll_ddelete,
|
||||||
|
@ -801,7 +801,6 @@ int ll_hsm_release(struct inode *inode);
|
|||||||
|
|
||||||
/* llite/dcache.c */
|
/* llite/dcache.c */
|
||||||
|
|
||||||
int ll_d_init(struct dentry *de);
|
|
||||||
extern const struct dentry_operations ll_d_ops;
|
extern const struct dentry_operations ll_d_ops;
|
||||||
void ll_intent_drop_lock(struct lookup_intent *);
|
void ll_intent_drop_lock(struct lookup_intent *);
|
||||||
void ll_intent_release(struct lookup_intent *);
|
void ll_intent_release(struct lookup_intent *);
|
||||||
@ -1189,7 +1188,7 @@ dentry_may_statahead(struct inode *dir, struct dentry *dentry)
|
|||||||
* 'lld_sa_generation == lli->lli_sa_generation'.
|
* 'lld_sa_generation == lli->lli_sa_generation'.
|
||||||
*/
|
*/
|
||||||
ldd = ll_d2d(dentry);
|
ldd = ll_d2d(dentry);
|
||||||
if (ldd && ldd->lld_sa_generation == lli->lli_sa_generation)
|
if (ldd->lld_sa_generation == lli->lli_sa_generation)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -1317,17 +1316,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
|
|||||||
|
|
||||||
static inline int d_lustre_invalid(const struct dentry *dentry)
|
static inline int d_lustre_invalid(const struct dentry *dentry)
|
||||||
{
|
{
|
||||||
struct ll_dentry_data *lld = ll_d2d(dentry);
|
return ll_d2d(dentry)->lld_invalid;
|
||||||
|
|
||||||
return !lld || lld->lld_invalid;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __d_lustre_invalidate(struct dentry *dentry)
|
|
||||||
{
|
|
||||||
struct ll_dentry_data *lld = ll_d2d(dentry);
|
|
||||||
|
|
||||||
if (lld)
|
|
||||||
lld->lld_invalid = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1343,7 +1332,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
|
|||||||
|
|
||||||
spin_lock_nested(&dentry->d_lock,
|
spin_lock_nested(&dentry->d_lock,
|
||||||
nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
|
nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
|
||||||
__d_lustre_invalidate(dentry);
|
ll_d2d(dentry)->lld_invalid = 1;
|
||||||
/*
|
/*
|
||||||
* We should be careful about dentries created by d_obtain_alias().
|
* We should be careful about dentries created by d_obtain_alias().
|
||||||
* These dentries are not put in the dentry tree, instead they are
|
* These dentries are not put in the dentry tree, instead they are
|
||||||
|
@ -169,22 +169,12 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
|
|||||||
/* N.B. d_obtain_alias() drops inode ref on error */
|
/* N.B. d_obtain_alias() drops inode ref on error */
|
||||||
result = d_obtain_alias(inode);
|
result = d_obtain_alias(inode);
|
||||||
if (!IS_ERR(result)) {
|
if (!IS_ERR(result)) {
|
||||||
int rc;
|
/*
|
||||||
|
* Need to signal to the ll_intent_file_open that
|
||||||
rc = ll_d_init(result);
|
* we came from NFS and so opencache needs to be
|
||||||
if (rc < 0) {
|
* enabled for this one
|
||||||
dput(result);
|
*/
|
||||||
result = ERR_PTR(rc);
|
ll_d2d(result)->lld_nfs_dentry = 1;
|
||||||
} else {
|
|
||||||
struct ll_dentry_data *ldd = ll_d2d(result);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Need to signal to the ll_intent_file_open that
|
|
||||||
* we came from NFS and so opencache needs to be
|
|
||||||
* enabled for this one
|
|
||||||
*/
|
|
||||||
ldd->lld_nfs_dentry = 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
@ -395,17 +395,9 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
|
|||||||
*/
|
*/
|
||||||
struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
|
struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
|
||||||
{
|
{
|
||||||
struct dentry *new;
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
if (inode) {
|
if (inode) {
|
||||||
new = ll_find_alias(inode, de);
|
struct dentry *new = ll_find_alias(inode, de);
|
||||||
if (new) {
|
if (new) {
|
||||||
rc = ll_d_init(new);
|
|
||||||
if (rc < 0) {
|
|
||||||
dput(new);
|
|
||||||
return ERR_PTR(rc);
|
|
||||||
}
|
|
||||||
d_move(new, de);
|
d_move(new, de);
|
||||||
iput(inode);
|
iput(inode);
|
||||||
CDEBUG(D_DENTRY,
|
CDEBUG(D_DENTRY,
|
||||||
@ -414,9 +406,6 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
|
|||||||
return new;
|
return new;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rc = ll_d_init(de);
|
|
||||||
if (rc < 0)
|
|
||||||
return ERR_PTR(rc);
|
|
||||||
d_add(de, inode);
|
d_add(de, inode);
|
||||||
CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
|
CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
|
||||||
de, d_inode(de), d_count(de), de->d_flags);
|
de, d_inode(de), d_count(de), de->d_flags);
|
||||||
|
@ -1513,9 +1513,7 @@ out_unplug:
|
|||||||
*/
|
*/
|
||||||
ldd = ll_d2d(*dentryp);
|
ldd = ll_d2d(*dentryp);
|
||||||
lli = ll_i2info(dir);
|
lli = ll_i2info(dir);
|
||||||
/* ldd can be NULL if llite lookup failed. */
|
ldd->lld_sa_generation = lli->lli_sa_generation;
|
||||||
if (ldd)
|
|
||||||
ldd->lld_sa_generation = lli->lli_sa_generation;
|
|
||||||
sa_put(sai, entry);
|
sa_put(sai, entry);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -949,7 +949,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
|
|||||||
goto error_mutex;
|
goto error_mutex;
|
||||||
}
|
}
|
||||||
if (!io_data->read &&
|
if (!io_data->read &&
|
||||||
copy_from_iter(data, data_len, &io_data->data) != data_len) {
|
!copy_from_iter_full(data, data_len, &io_data->data)) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto error_mutex;
|
goto error_mutex;
|
||||||
}
|
}
|
||||||
|
@ -667,7 +667,7 @@ ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(copy_from_iter(buf, len, from) != len)) {
|
if (unlikely(!copy_from_iter_full(buf, len, from))) {
|
||||||
value = -EFAULT;
|
value = -EFAULT;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -922,8 +922,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
|||||||
*/
|
*/
|
||||||
iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
|
iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
|
||||||
|
|
||||||
ret = copy_from_iter(req, req_size, &out_iter);
|
if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
|
||||||
if (unlikely(ret != req_size)) {
|
|
||||||
vq_err(vq, "Faulted on copy_from_iter\n");
|
vq_err(vq, "Faulted on copy_from_iter\n");
|
||||||
vhost_scsi_send_bad_target(vs, vq, head, out);
|
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||||
continue;
|
continue;
|
||||||
|
@ -1862,8 +1862,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
|
|||||||
i, count);
|
i, count);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
|
if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
|
||||||
sizeof(desc))) {
|
|
||||||
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
|
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
|
||||||
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
|
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -32,40 +32,19 @@ const struct dentry_operations ceph_dentry_ops;
|
|||||||
/*
|
/*
|
||||||
* Initialize ceph dentry state.
|
* Initialize ceph dentry state.
|
||||||
*/
|
*/
|
||||||
int ceph_init_dentry(struct dentry *dentry)
|
static int ceph_d_init(struct dentry *dentry)
|
||||||
{
|
{
|
||||||
struct ceph_dentry_info *di;
|
struct ceph_dentry_info *di;
|
||||||
|
|
||||||
if (dentry->d_fsdata)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
|
di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
|
||||||
if (!di)
|
if (!di)
|
||||||
return -ENOMEM; /* oh well */
|
return -ENOMEM; /* oh well */
|
||||||
|
|
||||||
spin_lock(&dentry->d_lock);
|
|
||||||
if (dentry->d_fsdata) {
|
|
||||||
/* lost a race */
|
|
||||||
kmem_cache_free(ceph_dentry_cachep, di);
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP)
|
|
||||||
d_set_d_op(dentry, &ceph_dentry_ops);
|
|
||||||
else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
|
|
||||||
d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
|
|
||||||
else
|
|
||||||
d_set_d_op(dentry, &ceph_snap_dentry_ops);
|
|
||||||
|
|
||||||
di->dentry = dentry;
|
di->dentry = dentry;
|
||||||
di->lease_session = NULL;
|
di->lease_session = NULL;
|
||||||
di->time = jiffies;
|
di->time = jiffies;
|
||||||
/* avoid reordering d_fsdata setup so that the check above is safe */
|
|
||||||
smp_mb();
|
|
||||||
dentry->d_fsdata = di;
|
dentry->d_fsdata = di;
|
||||||
ceph_dentry_lru_add(dentry);
|
ceph_dentry_lru_add(dentry);
|
||||||
out_unlock:
|
|
||||||
spin_unlock(&dentry->d_lock);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -737,10 +716,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
|
|||||||
if (dentry->d_name.len > NAME_MAX)
|
if (dentry->d_name.len > NAME_MAX)
|
||||||
return ERR_PTR(-ENAMETOOLONG);
|
return ERR_PTR(-ENAMETOOLONG);
|
||||||
|
|
||||||
err = ceph_init_dentry(dentry);
|
|
||||||
if (err < 0)
|
|
||||||
return ERR_PTR(err);
|
|
||||||
|
|
||||||
/* can we conclude ENOENT locally? */
|
/* can we conclude ENOENT locally? */
|
||||||
if (d_really_is_negative(dentry)) {
|
if (d_really_is_negative(dentry)) {
|
||||||
struct ceph_inode_info *ci = ceph_inode(dir);
|
struct ceph_inode_info *ci = ceph_inode(dir);
|
||||||
@ -1319,16 +1294,6 @@ static void ceph_d_release(struct dentry *dentry)
|
|||||||
kmem_cache_free(ceph_dentry_cachep, di);
|
kmem_cache_free(ceph_dentry_cachep, di);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ceph_snapdir_d_revalidate(struct dentry *dentry,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Eventually, we'll want to revalidate snapped metadata
|
|
||||||
* too... probably...
|
|
||||||
*/
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When the VFS prunes a dentry from the cache, we need to clear the
|
* When the VFS prunes a dentry from the cache, we need to clear the
|
||||||
* complete flag on the parent directory.
|
* complete flag on the parent directory.
|
||||||
@ -1347,6 +1312,9 @@ static void ceph_d_prune(struct dentry *dentry)
|
|||||||
if (d_unhashed(dentry))
|
if (d_unhashed(dentry))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we hold d_lock, so d_parent is stable, and d_fsdata is never
|
* we hold d_lock, so d_parent is stable, and d_fsdata is never
|
||||||
* cleared until d_release
|
* cleared until d_release
|
||||||
@ -1517,14 +1485,5 @@ const struct dentry_operations ceph_dentry_ops = {
|
|||||||
.d_revalidate = ceph_d_revalidate,
|
.d_revalidate = ceph_d_revalidate,
|
||||||
.d_release = ceph_d_release,
|
.d_release = ceph_d_release,
|
||||||
.d_prune = ceph_d_prune,
|
.d_prune = ceph_d_prune,
|
||||||
};
|
.d_init = ceph_d_init,
|
||||||
|
|
||||||
const struct dentry_operations ceph_snapdir_dentry_ops = {
|
|
||||||
.d_revalidate = ceph_snapdir_d_revalidate,
|
|
||||||
.d_release = ceph_d_release,
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct dentry_operations ceph_snap_dentry_ops = {
|
|
||||||
.d_release = ceph_d_release,
|
|
||||||
.d_prune = ceph_d_prune,
|
|
||||||
};
|
};
|
||||||
|
@ -62,7 +62,6 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
|
|||||||
{
|
{
|
||||||
struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
|
struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
struct dentry *dentry;
|
|
||||||
struct ceph_vino vino;
|
struct ceph_vino vino;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -94,16 +93,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
|
|||||||
return ERR_PTR(-ESTALE);
|
return ERR_PTR(-ESTALE);
|
||||||
}
|
}
|
||||||
|
|
||||||
dentry = d_obtain_alias(inode);
|
return d_obtain_alias(inode);
|
||||||
if (IS_ERR(dentry))
|
|
||||||
return dentry;
|
|
||||||
err = ceph_init_dentry(dentry);
|
|
||||||
if (err < 0) {
|
|
||||||
dput(dentry);
|
|
||||||
return ERR_PTR(err);
|
|
||||||
}
|
|
||||||
dout("__fh_to_dentry %llx %p dentry %p\n", ino, inode, dentry);
|
|
||||||
return dentry;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -131,7 +121,6 @@ static struct dentry *__get_parent(struct super_block *sb,
|
|||||||
struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
|
struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
|
||||||
struct ceph_mds_request *req;
|
struct ceph_mds_request *req;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
struct dentry *dentry;
|
|
||||||
int mask;
|
int mask;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -164,18 +153,7 @@ static struct dentry *__get_parent(struct super_block *sb,
|
|||||||
if (!inode)
|
if (!inode)
|
||||||
return ERR_PTR(-ENOENT);
|
return ERR_PTR(-ENOENT);
|
||||||
|
|
||||||
dentry = d_obtain_alias(inode);
|
return d_obtain_alias(inode);
|
||||||
if (IS_ERR(dentry))
|
|
||||||
return dentry;
|
|
||||||
err = ceph_init_dentry(dentry);
|
|
||||||
if (err < 0) {
|
|
||||||
dput(dentry);
|
|
||||||
return ERR_PTR(err);
|
|
||||||
}
|
|
||||||
dout("__get_parent ino %llx parent %p ino %llx.%llx\n",
|
|
||||||
child ? ceph_ino(d_inode(child)) : ino,
|
|
||||||
dentry, ceph_vinop(inode));
|
|
||||||
return dentry;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dentry *ceph_get_parent(struct dentry *child)
|
static struct dentry *ceph_get_parent(struct dentry *child)
|
||||||
|
@ -351,10 +351,6 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
|
|||||||
if (dentry->d_name.len > NAME_MAX)
|
if (dentry->d_name.len > NAME_MAX)
|
||||||
return -ENAMETOOLONG;
|
return -ENAMETOOLONG;
|
||||||
|
|
||||||
err = ceph_init_dentry(dentry);
|
|
||||||
if (err < 0)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
if (flags & O_CREAT) {
|
if (flags & O_CREAT) {
|
||||||
err = ceph_pre_init_acls(dir, &mode, &acls);
|
err = ceph_pre_init_acls(dir, &mode, &acls);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
|
@ -1023,16 +1023,17 @@ static void update_dentry_lease(struct dentry *dentry,
|
|||||||
long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
|
long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
|
||||||
struct inode *dir;
|
struct inode *dir;
|
||||||
|
|
||||||
/* only track leases on regular dentries */
|
|
||||||
if (dentry->d_op != &ceph_dentry_ops)
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock(&dentry->d_lock);
|
spin_lock(&dentry->d_lock);
|
||||||
dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
|
dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
|
||||||
dentry, duration, ttl);
|
dentry, duration, ttl);
|
||||||
|
|
||||||
/* make lease_rdcache_gen match directory */
|
/* make lease_rdcache_gen match directory */
|
||||||
dir = d_inode(dentry->d_parent);
|
dir = d_inode(dentry->d_parent);
|
||||||
|
|
||||||
|
/* only track leases on regular dentries */
|
||||||
|
if (ceph_snap(dir) != CEPH_NOSNAP)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
|
di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
|
||||||
|
|
||||||
if (duration == 0)
|
if (duration == 0)
|
||||||
@ -1202,12 +1203,7 @@ retry_lookup:
|
|||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
err = ceph_init_dentry(dn);
|
err = 0;
|
||||||
if (err < 0) {
|
|
||||||
dput(dn);
|
|
||||||
dput(parent);
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
} else if (d_really_is_positive(dn) &&
|
} else if (d_really_is_positive(dn) &&
|
||||||
(ceph_ino(d_inode(dn)) != vino.ino ||
|
(ceph_ino(d_inode(dn)) != vino.ino ||
|
||||||
ceph_snap(d_inode(dn)) != vino.snap)) {
|
ceph_snap(d_inode(dn)) != vino.snap)) {
|
||||||
@ -1561,12 +1557,6 @@ retry_lookup:
|
|||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
ret = ceph_init_dentry(dn);
|
|
||||||
if (ret < 0) {
|
|
||||||
dput(dn);
|
|
||||||
err = ret;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
} else if (d_really_is_positive(dn) &&
|
} else if (d_really_is_positive(dn) &&
|
||||||
(ceph_ino(d_inode(dn)) != vino.ino ||
|
(ceph_ino(d_inode(dn)) != vino.ino ||
|
||||||
ceph_snap(d_inode(dn)) != vino.snap)) {
|
ceph_snap(d_inode(dn)) != vino.snap)) {
|
||||||
|
@ -795,7 +795,6 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
|
|||||||
root = ERR_PTR(-ENOMEM);
|
root = ERR_PTR(-ENOMEM);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
ceph_init_dentry(root);
|
|
||||||
dout("open_root_inode success, root dentry is %p\n", root);
|
dout("open_root_inode success, root dentry is %p\n", root);
|
||||||
} else {
|
} else {
|
||||||
root = ERR_PTR(err);
|
root = ERR_PTR(err);
|
||||||
@ -879,6 +878,7 @@ static int ceph_set_super(struct super_block *s, void *data)
|
|||||||
fsc->sb = s;
|
fsc->sb = s;
|
||||||
|
|
||||||
s->s_op = &ceph_super_ops;
|
s->s_op = &ceph_super_ops;
|
||||||
|
s->s_d_op = &ceph_dentry_ops;
|
||||||
s->s_export_op = &ceph_export_ops;
|
s->s_export_op = &ceph_export_ops;
|
||||||
|
|
||||||
s->s_time_gran = 1000; /* 1000 ns == 1 us */
|
s->s_time_gran = 1000; /* 1000 ns == 1 us */
|
||||||
|
@ -934,8 +934,7 @@ extern const struct file_operations ceph_dir_fops;
|
|||||||
extern const struct file_operations ceph_snapdir_fops;
|
extern const struct file_operations ceph_snapdir_fops;
|
||||||
extern const struct inode_operations ceph_dir_iops;
|
extern const struct inode_operations ceph_dir_iops;
|
||||||
extern const struct inode_operations ceph_snapdir_iops;
|
extern const struct inode_operations ceph_snapdir_iops;
|
||||||
extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
|
extern const struct dentry_operations ceph_dentry_ops;
|
||||||
ceph_snapdir_dentry_ops;
|
|
||||||
|
|
||||||
extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
|
extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
|
||||||
extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
|
extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
|
||||||
@ -951,13 +950,6 @@ extern void ceph_invalidate_dentry_lease(struct dentry *dentry);
|
|||||||
extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn);
|
extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn);
|
||||||
extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl);
|
extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl);
|
||||||
|
|
||||||
/*
|
|
||||||
* our d_ops vary depending on whether the inode is live,
|
|
||||||
* snapshotted (read-only), or a virtual ".snap" directory.
|
|
||||||
*/
|
|
||||||
int ceph_init_dentry(struct dentry *dentry);
|
|
||||||
|
|
||||||
|
|
||||||
/* ioctl.c */
|
/* ioctl.c */
|
||||||
extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||||
|
|
||||||
|
147
fs/namei.c
147
fs/namei.c
@ -1725,30 +1725,35 @@ static int pick_link(struct nameidata *nd, struct path *link,
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum {WALK_FOLLOW = 1, WALK_MORE = 2};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do we need to follow links? We _really_ want to be able
|
* Do we need to follow links? We _really_ want to be able
|
||||||
* to do this check without having to look at inode->i_op,
|
* to do this check without having to look at inode->i_op,
|
||||||
* so we keep a cache of "no, this doesn't need follow_link"
|
* so we keep a cache of "no, this doesn't need follow_link"
|
||||||
* for the common case.
|
* for the common case.
|
||||||
*/
|
*/
|
||||||
static inline int should_follow_link(struct nameidata *nd, struct path *link,
|
static inline int step_into(struct nameidata *nd, struct path *path,
|
||||||
int follow,
|
int flags, struct inode *inode, unsigned seq)
|
||||||
struct inode *inode, unsigned seq)
|
|
||||||
{
|
{
|
||||||
if (likely(!d_is_symlink(link->dentry)))
|
if (!(flags & WALK_MORE) && nd->depth)
|
||||||
return 0;
|
put_link(nd);
|
||||||
if (!follow)
|
if (likely(!d_is_symlink(path->dentry)) ||
|
||||||
|
!(flags & WALK_FOLLOW || nd->flags & LOOKUP_FOLLOW)) {
|
||||||
|
/* not a symlink or should not follow */
|
||||||
|
path_to_nameidata(path, nd);
|
||||||
|
nd->inode = inode;
|
||||||
|
nd->seq = seq;
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
/* make sure that d_is_symlink above matches inode */
|
/* make sure that d_is_symlink above matches inode */
|
||||||
if (nd->flags & LOOKUP_RCU) {
|
if (nd->flags & LOOKUP_RCU) {
|
||||||
if (read_seqcount_retry(&link->dentry->d_seq, seq))
|
if (read_seqcount_retry(&path->dentry->d_seq, seq))
|
||||||
return -ECHILD;
|
return -ECHILD;
|
||||||
}
|
}
|
||||||
return pick_link(nd, link, inode, seq);
|
return pick_link(nd, path, inode, seq);
|
||||||
}
|
}
|
||||||
|
|
||||||
enum {WALK_GET = 1, WALK_PUT = 2};
|
|
||||||
|
|
||||||
static int walk_component(struct nameidata *nd, int flags)
|
static int walk_component(struct nameidata *nd, int flags)
|
||||||
{
|
{
|
||||||
struct path path;
|
struct path path;
|
||||||
@ -1762,7 +1767,7 @@ static int walk_component(struct nameidata *nd, int flags)
|
|||||||
*/
|
*/
|
||||||
if (unlikely(nd->last_type != LAST_NORM)) {
|
if (unlikely(nd->last_type != LAST_NORM)) {
|
||||||
err = handle_dots(nd, nd->last_type);
|
err = handle_dots(nd, nd->last_type);
|
||||||
if (flags & WALK_PUT)
|
if (!(flags & WALK_MORE) && nd->depth)
|
||||||
put_link(nd);
|
put_link(nd);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -1789,15 +1794,7 @@ static int walk_component(struct nameidata *nd, int flags)
|
|||||||
inode = d_backing_inode(path.dentry);
|
inode = d_backing_inode(path.dentry);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & WALK_PUT)
|
return step_into(nd, &path, flags, inode, seq);
|
||||||
put_link(nd);
|
|
||||||
err = should_follow_link(nd, &path, flags & WALK_GET, inode, seq);
|
|
||||||
if (unlikely(err))
|
|
||||||
return err;
|
|
||||||
path_to_nameidata(&path, nd);
|
|
||||||
nd->inode = inode;
|
|
||||||
nd->seq = seq;
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2104,9 +2101,10 @@ OK:
|
|||||||
if (!name)
|
if (!name)
|
||||||
return 0;
|
return 0;
|
||||||
/* last component of nested symlink */
|
/* last component of nested symlink */
|
||||||
err = walk_component(nd, WALK_GET | WALK_PUT);
|
err = walk_component(nd, WALK_FOLLOW);
|
||||||
} else {
|
} else {
|
||||||
err = walk_component(nd, WALK_GET);
|
/* not the last component */
|
||||||
|
err = walk_component(nd, WALK_FOLLOW | WALK_MORE);
|
||||||
}
|
}
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
@ -2248,12 +2246,7 @@ static inline int lookup_last(struct nameidata *nd)
|
|||||||
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
|
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
|
||||||
|
|
||||||
nd->flags &= ~LOOKUP_PARENT;
|
nd->flags &= ~LOOKUP_PARENT;
|
||||||
return walk_component(nd,
|
return walk_component(nd, 0);
|
||||||
nd->flags & LOOKUP_FOLLOW
|
|
||||||
? nd->depth
|
|
||||||
? WALK_PUT | WALK_GET
|
|
||||||
: WALK_GET
|
|
||||||
: 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
|
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
|
||||||
@ -2558,28 +2551,9 @@ int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(user_path_at_empty);
|
EXPORT_SYMBOL(user_path_at_empty);
|
||||||
|
|
||||||
/*
|
|
||||||
* NB: most callers don't do anything directly with the reference to the
|
|
||||||
* to struct filename, but the nd->last pointer points into the name string
|
|
||||||
* allocated by getname. So we must hold the reference to it until all
|
|
||||||
* path-walking is complete.
|
|
||||||
*/
|
|
||||||
static inline struct filename *
|
|
||||||
user_path_parent(int dfd, const char __user *path,
|
|
||||||
struct path *parent,
|
|
||||||
struct qstr *last,
|
|
||||||
int *type,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
/* only LOOKUP_REVAL is allowed in extra flags */
|
|
||||||
return filename_parentat(dfd, getname(path), flags & LOOKUP_REVAL,
|
|
||||||
parent, last, type);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mountpoint_last - look up last component for umount
|
* mountpoint_last - look up last component for umount
|
||||||
* @nd: pathwalk nameidata - currently pointing at parent directory of "last"
|
* @nd: pathwalk nameidata - currently pointing at parent directory of "last"
|
||||||
* @path: pointer to container for result
|
|
||||||
*
|
*
|
||||||
* This is a special lookup_last function just for umount. In this case, we
|
* This is a special lookup_last function just for umount. In this case, we
|
||||||
* need to resolve the path without doing any revalidation.
|
* need to resolve the path without doing any revalidation.
|
||||||
@ -2592,23 +2566,20 @@ user_path_parent(int dfd, const char __user *path,
|
|||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* -error: if there was an error during lookup. This includes -ENOENT if the
|
* -error: if there was an error during lookup. This includes -ENOENT if the
|
||||||
* lookup found a negative dentry. The nd->path reference will also be
|
* lookup found a negative dentry.
|
||||||
* put in this case.
|
|
||||||
*
|
*
|
||||||
* 0: if we successfully resolved nd->path and found it to not to be a
|
* 0: if we successfully resolved nd->last and found it to not to be a
|
||||||
* symlink that needs to be followed. "path" will also be populated.
|
* symlink that needs to be followed.
|
||||||
* The nd->path reference will also be put.
|
|
||||||
*
|
*
|
||||||
* 1: if we successfully resolved nd->last and found it to be a symlink
|
* 1: if we successfully resolved nd->last and found it to be a symlink
|
||||||
* that needs to be followed. "path" will be populated with the path
|
* that needs to be followed.
|
||||||
* to the link, and nd->path will *not* be put.
|
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
mountpoint_last(struct nameidata *nd, struct path *path)
|
mountpoint_last(struct nameidata *nd)
|
||||||
{
|
{
|
||||||
int error = 0;
|
int error = 0;
|
||||||
struct dentry *dentry;
|
|
||||||
struct dentry *dir = nd->path.dentry;
|
struct dentry *dir = nd->path.dentry;
|
||||||
|
struct path path;
|
||||||
|
|
||||||
/* If we're in rcuwalk, drop out of it to handle last component */
|
/* If we're in rcuwalk, drop out of it to handle last component */
|
||||||
if (nd->flags & LOOKUP_RCU) {
|
if (nd->flags & LOOKUP_RCU) {
|
||||||
@ -2622,37 +2593,28 @@ mountpoint_last(struct nameidata *nd, struct path *path)
|
|||||||
error = handle_dots(nd, nd->last_type);
|
error = handle_dots(nd, nd->last_type);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
dentry = dget(nd->path.dentry);
|
path.dentry = dget(nd->path.dentry);
|
||||||
} else {
|
} else {
|
||||||
dentry = d_lookup(dir, &nd->last);
|
path.dentry = d_lookup(dir, &nd->last);
|
||||||
if (!dentry) {
|
if (!path.dentry) {
|
||||||
/*
|
/*
|
||||||
* No cached dentry. Mounted dentries are pinned in the
|
* No cached dentry. Mounted dentries are pinned in the
|
||||||
* cache, so that means that this dentry is probably
|
* cache, so that means that this dentry is probably
|
||||||
* a symlink or the path doesn't actually point
|
* a symlink or the path doesn't actually point
|
||||||
* to a mounted dentry.
|
* to a mounted dentry.
|
||||||
*/
|
*/
|
||||||
dentry = lookup_slow(&nd->last, dir,
|
path.dentry = lookup_slow(&nd->last, dir,
|
||||||
nd->flags | LOOKUP_NO_REVAL);
|
nd->flags | LOOKUP_NO_REVAL);
|
||||||
if (IS_ERR(dentry))
|
if (IS_ERR(path.dentry))
|
||||||
return PTR_ERR(dentry);
|
return PTR_ERR(path.dentry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (d_is_negative(dentry)) {
|
if (d_is_negative(path.dentry)) {
|
||||||
dput(dentry);
|
dput(path.dentry);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
if (nd->depth)
|
path.mnt = nd->path.mnt;
|
||||||
put_link(nd);
|
return step_into(nd, &path, 0, d_backing_inode(path.dentry), 0);
|
||||||
path->dentry = dentry;
|
|
||||||
path->mnt = nd->path.mnt;
|
|
||||||
error = should_follow_link(nd, path, nd->flags & LOOKUP_FOLLOW,
|
|
||||||
d_backing_inode(dentry), 0);
|
|
||||||
if (unlikely(error))
|
|
||||||
return error;
|
|
||||||
mntget(path->mnt);
|
|
||||||
follow_mount(path);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -2672,13 +2634,19 @@ path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
|
|||||||
if (IS_ERR(s))
|
if (IS_ERR(s))
|
||||||
return PTR_ERR(s);
|
return PTR_ERR(s);
|
||||||
while (!(err = link_path_walk(s, nd)) &&
|
while (!(err = link_path_walk(s, nd)) &&
|
||||||
(err = mountpoint_last(nd, path)) > 0) {
|
(err = mountpoint_last(nd)) > 0) {
|
||||||
s = trailing_symlink(nd);
|
s = trailing_symlink(nd);
|
||||||
if (IS_ERR(s)) {
|
if (IS_ERR(s)) {
|
||||||
err = PTR_ERR(s);
|
err = PTR_ERR(s);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!err) {
|
||||||
|
*path = nd->path;
|
||||||
|
nd->path.mnt = NULL;
|
||||||
|
nd->path.dentry = NULL;
|
||||||
|
follow_mount(path);
|
||||||
|
}
|
||||||
terminate_walk(nd);
|
terminate_walk(nd);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -3335,18 +3303,11 @@ static int do_last(struct nameidata *nd,
|
|||||||
seq = 0; /* out of RCU mode, so the value doesn't matter */
|
seq = 0; /* out of RCU mode, so the value doesn't matter */
|
||||||
inode = d_backing_inode(path.dentry);
|
inode = d_backing_inode(path.dentry);
|
||||||
finish_lookup:
|
finish_lookup:
|
||||||
if (nd->depth)
|
error = step_into(nd, &path, 0, inode, seq);
|
||||||
put_link(nd);
|
|
||||||
error = should_follow_link(nd, &path, nd->flags & LOOKUP_FOLLOW,
|
|
||||||
inode, seq);
|
|
||||||
if (unlikely(error))
|
if (unlikely(error))
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
path_to_nameidata(&path, nd);
|
|
||||||
nd->inode = inode;
|
|
||||||
nd->seq = seq;
|
|
||||||
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
|
|
||||||
finish_open:
|
finish_open:
|
||||||
|
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
|
||||||
error = complete_walk(nd);
|
error = complete_walk(nd);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
@ -3861,8 +3822,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
|
|||||||
int type;
|
int type;
|
||||||
unsigned int lookup_flags = 0;
|
unsigned int lookup_flags = 0;
|
||||||
retry:
|
retry:
|
||||||
name = user_path_parent(dfd, pathname,
|
name = filename_parentat(dfd, getname(pathname), lookup_flags,
|
||||||
&path, &last, &type, lookup_flags);
|
&path, &last, &type);
|
||||||
if (IS_ERR(name))
|
if (IS_ERR(name))
|
||||||
return PTR_ERR(name);
|
return PTR_ERR(name);
|
||||||
|
|
||||||
@ -3991,8 +3952,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
|
|||||||
struct inode *delegated_inode = NULL;
|
struct inode *delegated_inode = NULL;
|
||||||
unsigned int lookup_flags = 0;
|
unsigned int lookup_flags = 0;
|
||||||
retry:
|
retry:
|
||||||
name = user_path_parent(dfd, pathname,
|
name = filename_parentat(dfd, getname(pathname), lookup_flags,
|
||||||
&path, &last, &type, lookup_flags);
|
&path, &last, &type);
|
||||||
if (IS_ERR(name))
|
if (IS_ERR(name))
|
||||||
return PTR_ERR(name);
|
return PTR_ERR(name);
|
||||||
|
|
||||||
@ -4491,15 +4452,15 @@ SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
|
|||||||
target_flags = 0;
|
target_flags = 0;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
from = user_path_parent(olddfd, oldname,
|
from = filename_parentat(olddfd, getname(oldname), lookup_flags,
|
||||||
&old_path, &old_last, &old_type, lookup_flags);
|
&old_path, &old_last, &old_type);
|
||||||
if (IS_ERR(from)) {
|
if (IS_ERR(from)) {
|
||||||
error = PTR_ERR(from);
|
error = PTR_ERR(from);
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
to = user_path_parent(newdfd, newname,
|
to = filename_parentat(newdfd, getname(newname), lookup_flags,
|
||||||
&new_path, &new_last, &new_type, lookup_flags);
|
&new_path, &new_last, &new_type);
|
||||||
if (IS_ERR(to)) {
|
if (IS_ERR(to)) {
|
||||||
error = PTR_ERR(to);
|
error = PTR_ERR(to);
|
||||||
goto exit1;
|
goto exit1;
|
||||||
|
@ -203,7 +203,7 @@ ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||||||
bufsize - (pos % bufsize),
|
bufsize - (pos % bufsize),
|
||||||
iov_iter_count(from));
|
iov_iter_count(from));
|
||||||
|
|
||||||
if (copy_from_iter(bouncebuffer, to_write, from) != to_write) {
|
if (!copy_from_iter_full(bouncebuffer, to_write, from)) {
|
||||||
errno = -EFAULT;
|
errno = -EFAULT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -355,7 +355,6 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
|
|||||||
__u64 tag;
|
__u64 tag;
|
||||||
} head;
|
} head;
|
||||||
int total = ret = iov_iter_count(iter);
|
int total = ret = iov_iter_count(iter);
|
||||||
int n;
|
|
||||||
int downcall_size = sizeof(struct orangefs_downcall_s);
|
int downcall_size = sizeof(struct orangefs_downcall_s);
|
||||||
int head_size = sizeof(head);
|
int head_size = sizeof(head);
|
||||||
|
|
||||||
@ -372,8 +371,7 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
n = copy_from_iter(&head, head_size, iter);
|
if (!copy_from_iter_full(&head, head_size, iter)) {
|
||||||
if (n < head_size) {
|
|
||||||
gossip_err("%s: failed to copy head.\n", __func__);
|
gossip_err("%s: failed to copy head.\n", __func__);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
@ -407,8 +405,7 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
n = copy_from_iter(&op->downcall, downcall_size, iter);
|
if (!copy_from_iter_full(&op->downcall, downcall_size, iter)) {
|
||||||
if (n != downcall_size) {
|
|
||||||
gossip_err("%s: failed to copy downcall.\n", __func__);
|
gossip_err("%s: failed to copy downcall.\n", __func__);
|
||||||
goto Efault;
|
goto Efault;
|
||||||
}
|
}
|
||||||
@ -462,10 +459,8 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
|
|||||||
goto Enomem;
|
goto Enomem;
|
||||||
}
|
}
|
||||||
memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
|
memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
|
||||||
n = copy_from_iter(op->downcall.trailer_buf,
|
if (!copy_from_iter_full(op->downcall.trailer_buf,
|
||||||
op->downcall.trailer_size,
|
op->downcall.trailer_size, iter)) {
|
||||||
iter);
|
|
||||||
if (n != op->downcall.trailer_size) {
|
|
||||||
gossip_err("%s: failed to copy trailer.\n", __func__);
|
gossip_err("%s: failed to copy trailer.\n", __func__);
|
||||||
vfree(op->downcall.trailer_buf);
|
vfree(op->downcall.trailer_buf);
|
||||||
goto Efault;
|
goto Efault;
|
||||||
|
@ -2809,12 +2809,12 @@ static inline int skb_add_data(struct sk_buff *skb,
|
|||||||
|
|
||||||
if (skb->ip_summed == CHECKSUM_NONE) {
|
if (skb->ip_summed == CHECKSUM_NONE) {
|
||||||
__wsum csum = 0;
|
__wsum csum = 0;
|
||||||
if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
|
if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
|
||||||
&csum, from) == copy) {
|
&csum, from)) {
|
||||||
skb->csum = csum_block_add(skb->csum, csum, off);
|
skb->csum = csum_block_add(skb->csum, csum, off);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
} else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
|
} else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
__skb_trim(skb, off);
|
__skb_trim(skb, off);
|
||||||
|
@ -89,7 +89,9 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
|
|||||||
struct iov_iter *i);
|
struct iov_iter *i);
|
||||||
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
|
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
|
||||||
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
|
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
|
||||||
|
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
|
||||||
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
|
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
|
||||||
|
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
|
||||||
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
|
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
|
||||||
unsigned long iov_iter_alignment(const struct iov_iter *i);
|
unsigned long iov_iter_alignment(const struct iov_iter *i);
|
||||||
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
|
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
|
||||||
@ -155,6 +157,7 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
|
|||||||
}
|
}
|
||||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||||
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||||
|
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||||
|
|
||||||
int import_iovec(int type, const struct iovec __user * uvector,
|
int import_iovec(int type, const struct iovec __user * uvector,
|
||||||
unsigned nr_segs, unsigned fast_segs,
|
unsigned nr_segs, unsigned fast_segs,
|
||||||
|
@ -1783,13 +1783,13 @@ static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
|
|||||||
{
|
{
|
||||||
if (skb->ip_summed == CHECKSUM_NONE) {
|
if (skb->ip_summed == CHECKSUM_NONE) {
|
||||||
__wsum csum = 0;
|
__wsum csum = 0;
|
||||||
if (csum_and_copy_from_iter(to, copy, &csum, from) != copy)
|
if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
skb->csum = csum_block_add(skb->csum, csum, offset);
|
skb->csum = csum_block_add(skb->csum, csum, offset);
|
||||||
} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
|
} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
|
||||||
if (copy_from_iter_nocache(to, copy, from) != copy)
|
if (!copy_from_iter_full_nocache(to, copy, from))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
} else if (copy_from_iter(to, copy, from) != copy)
|
} else if (!copy_from_iter_full(to, copy, from))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -20,7 +20,7 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
|
|||||||
int len, int odd, struct sk_buff *skb)
|
int len, int odd, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct msghdr *msg = from;
|
struct msghdr *msg = from;
|
||||||
return copy_from_iter(to, len, &msg->msg_iter) != len ? -EFAULT : 0;
|
return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Designate sk as UDP-Lite socket */
|
/* Designate sk as UDP-Lite socket */
|
||||||
|
@ -748,7 +748,7 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
buf[len] = '\0';
|
buf[len] = '\0';
|
||||||
if (copy_from_iter(buf, len, from) != len) {
|
if (!copy_from_iter_full(buf, len, from)) {
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
@ -568,6 +568,31 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(copy_from_iter);
|
EXPORT_SYMBOL(copy_from_iter);
|
||||||
|
|
||||||
|
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
|
||||||
|
{
|
||||||
|
char *to = addr;
|
||||||
|
if (unlikely(i->type & ITER_PIPE)) {
|
||||||
|
WARN_ON(1);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (unlikely(i->count < bytes)) \
|
||||||
|
return false;
|
||||||
|
|
||||||
|
iterate_all_kinds(i, bytes, v, ({
|
||||||
|
if (__copy_from_user((to += v.iov_len) - v.iov_len,
|
||||||
|
v.iov_base, v.iov_len))
|
||||||
|
return false;
|
||||||
|
0;}),
|
||||||
|
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
|
||||||
|
v.bv_offset, v.bv_len),
|
||||||
|
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
|
||||||
|
)
|
||||||
|
|
||||||
|
iov_iter_advance(i, bytes);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(copy_from_iter_full);
|
||||||
|
|
||||||
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
|
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
|
||||||
{
|
{
|
||||||
char *to = addr;
|
char *to = addr;
|
||||||
@ -587,6 +612,30 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(copy_from_iter_nocache);
|
EXPORT_SYMBOL(copy_from_iter_nocache);
|
||||||
|
|
||||||
|
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
|
||||||
|
{
|
||||||
|
char *to = addr;
|
||||||
|
if (unlikely(i->type & ITER_PIPE)) {
|
||||||
|
WARN_ON(1);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (unlikely(i->count < bytes)) \
|
||||||
|
return false;
|
||||||
|
iterate_all_kinds(i, bytes, v, ({
|
||||||
|
if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
|
||||||
|
v.iov_base, v.iov_len))
|
||||||
|
return false;
|
||||||
|
0;}),
|
||||||
|
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
|
||||||
|
v.bv_offset, v.bv_len),
|
||||||
|
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
|
||||||
|
)
|
||||||
|
|
||||||
|
iov_iter_advance(i, bytes);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(copy_from_iter_full_nocache);
|
||||||
|
|
||||||
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||||
struct iov_iter *i)
|
struct iov_iter *i)
|
||||||
{
|
{
|
||||||
@ -1008,7 +1057,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
|
|||||||
}
|
}
|
||||||
iterate_and_advance(i, bytes, v, ({
|
iterate_and_advance(i, bytes, v, ({
|
||||||
int err = 0;
|
int err = 0;
|
||||||
next = csum_and_copy_from_user(v.iov_base,
|
next = csum_and_copy_from_user(v.iov_base,
|
||||||
(to += v.iov_len) - v.iov_len,
|
(to += v.iov_len) - v.iov_len,
|
||||||
v.iov_len, 0, &err);
|
v.iov_len, 0, &err);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
@ -1037,6 +1086,51 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(csum_and_copy_from_iter);
|
EXPORT_SYMBOL(csum_and_copy_from_iter);
|
||||||
|
|
||||||
|
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
|
||||||
|
struct iov_iter *i)
|
||||||
|
{
|
||||||
|
char *to = addr;
|
||||||
|
__wsum sum, next;
|
||||||
|
size_t off = 0;
|
||||||
|
sum = *csum;
|
||||||
|
if (unlikely(i->type & ITER_PIPE)) {
|
||||||
|
WARN_ON(1);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (unlikely(i->count < bytes))
|
||||||
|
return false;
|
||||||
|
iterate_all_kinds(i, bytes, v, ({
|
||||||
|
int err = 0;
|
||||||
|
next = csum_and_copy_from_user(v.iov_base,
|
||||||
|
(to += v.iov_len) - v.iov_len,
|
||||||
|
v.iov_len, 0, &err);
|
||||||
|
if (err)
|
||||||
|
return false;
|
||||||
|
sum = csum_block_add(sum, next, off);
|
||||||
|
off += v.iov_len;
|
||||||
|
0;
|
||||||
|
}), ({
|
||||||
|
char *p = kmap_atomic(v.bv_page);
|
||||||
|
next = csum_partial_copy_nocheck(p + v.bv_offset,
|
||||||
|
(to += v.bv_len) - v.bv_len,
|
||||||
|
v.bv_len, 0);
|
||||||
|
kunmap_atomic(p);
|
||||||
|
sum = csum_block_add(sum, next, off);
|
||||||
|
off += v.bv_len;
|
||||||
|
}),({
|
||||||
|
next = csum_partial_copy_nocheck(v.iov_base,
|
||||||
|
(to += v.iov_len) - v.iov_len,
|
||||||
|
v.iov_len, 0);
|
||||||
|
sum = csum_block_add(sum, next, off);
|
||||||
|
off += v.iov_len;
|
||||||
|
})
|
||||||
|
)
|
||||||
|
*csum = sum;
|
||||||
|
iov_iter_advance(i, bytes);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
|
||||||
|
|
||||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
|
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
|
||||||
struct iov_iter *i)
|
struct iov_iter *i)
|
||||||
{
|
{
|
||||||
@ -1051,7 +1145,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
|
|||||||
iterate_and_advance(i, bytes, v, ({
|
iterate_and_advance(i, bytes, v, ({
|
||||||
int err = 0;
|
int err = 0;
|
||||||
next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
|
next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
|
||||||
v.iov_base,
|
v.iov_base,
|
||||||
v.iov_len, 0, &err);
|
v.iov_len, 0, &err);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
sum = csum_block_add(sum, next, off);
|
sum = csum_block_add(sum, next, off);
|
||||||
|
@ -630,7 +630,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
|
|||||||
goto out;
|
goto out;
|
||||||
skb->dev = NULL; /* for paths shared with net_device interfaces */
|
skb->dev = NULL; /* for paths shared with net_device interfaces */
|
||||||
ATM_SKB(skb)->atm_options = vcc->atm_options;
|
ATM_SKB(skb)->atm_options = vcc->atm_options;
|
||||||
if (copy_from_iter(skb_put(skb, size), size, &m->msg_iter) != size) {
|
if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
error = -EFAULT;
|
error = -EFAULT;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -2127,7 +2127,7 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
|
|||||||
struct sk_buff **frag;
|
struct sk_buff **frag;
|
||||||
int sent = 0;
|
int sent = 0;
|
||||||
|
|
||||||
if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
|
if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
sent += count;
|
sent += count;
|
||||||
@ -2147,8 +2147,8 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
|
|||||||
|
|
||||||
*frag = tmp;
|
*frag = tmp;
|
||||||
|
|
||||||
if (copy_from_iter(skb_put(*frag, count), count,
|
if (!copy_from_iter_full(skb_put(*frag, count), count,
|
||||||
&msg->msg_iter) != count)
|
&msg->msg_iter))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
sent += count;
|
sent += count;
|
||||||
|
@ -802,11 +802,11 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk
|
|||||||
struct msghdr *msg = from;
|
struct msghdr *msg = from;
|
||||||
|
|
||||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||||
if (copy_from_iter(to, len, &msg->msg_iter) != len)
|
if (!copy_from_iter_full(to, len, &msg->msg_iter))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
} else {
|
} else {
|
||||||
__wsum csum = 0;
|
__wsum csum = 0;
|
||||||
if (csum_and_copy_from_iter(to, len, &csum, &msg->msg_iter) != len)
|
if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
skb->csum = csum_block_add(skb->csum, csum, odd);
|
skb->csum = csum_block_add(skb->csum, csum, odd);
|
||||||
}
|
}
|
||||||
|
@ -609,15 +609,15 @@ int ping_getfrag(void *from, char *to,
|
|||||||
fraglen -= sizeof(struct icmphdr);
|
fraglen -= sizeof(struct icmphdr);
|
||||||
if (fraglen < 0)
|
if (fraglen < 0)
|
||||||
BUG();
|
BUG();
|
||||||
if (csum_and_copy_from_iter(to + sizeof(struct icmphdr),
|
if (!csum_and_copy_from_iter_full(to + sizeof(struct icmphdr),
|
||||||
fraglen, &pfh->wcheck,
|
fraglen, &pfh->wcheck,
|
||||||
&pfh->msg->msg_iter) != fraglen)
|
&pfh->msg->msg_iter))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
} else if (offset < sizeof(struct icmphdr)) {
|
} else if (offset < sizeof(struct icmphdr)) {
|
||||||
BUG();
|
BUG();
|
||||||
} else {
|
} else {
|
||||||
if (csum_and_copy_from_iter(to, fraglen, &pfh->wcheck,
|
if (!csum_and_copy_from_iter_full(to, fraglen, &pfh->wcheck,
|
||||||
&pfh->msg->msg_iter) != fraglen)
|
&pfh->msg->msg_iter))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2432,14 +2432,11 @@ static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
|
|||||||
static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
|
static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
|
||||||
struct virtio_net_hdr *vnet_hdr)
|
struct virtio_net_hdr *vnet_hdr)
|
||||||
{
|
{
|
||||||
int n;
|
|
||||||
|
|
||||||
if (*len < sizeof(*vnet_hdr))
|
if (*len < sizeof(*vnet_hdr))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
*len -= sizeof(*vnet_hdr);
|
*len -= sizeof(*vnet_hdr);
|
||||||
|
|
||||||
n = copy_from_iter(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter);
|
if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
|
||||||
if (n != sizeof(*vnet_hdr))
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
return __packet_snd_vnet_parse(vnet_hdr, *len);
|
return __packet_snd_vnet_parse(vnet_hdr, *len);
|
||||||
|
@ -268,7 +268,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
|
|||||||
__skb_queue_tail(list, skb);
|
__skb_queue_tail(list, skb);
|
||||||
skb_copy_to_linear_data(skb, mhdr, mhsz);
|
skb_copy_to_linear_data(skb, mhdr, mhsz);
|
||||||
pktpos = skb->data + mhsz;
|
pktpos = skb->data + mhsz;
|
||||||
if (copy_from_iter(pktpos, dsz, &m->msg_iter) == dsz)
|
if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
|
||||||
return dsz;
|
return dsz;
|
||||||
rc = -EFAULT;
|
rc = -EFAULT;
|
||||||
goto error;
|
goto error;
|
||||||
@ -299,7 +299,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
|
|||||||
if (drem < pktrem)
|
if (drem < pktrem)
|
||||||
pktrem = drem;
|
pktrem = drem;
|
||||||
|
|
||||||
if (copy_from_iter(pktpos, pktrem, &m->msg_iter) != pktrem) {
|
if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
|
||||||
rc = -EFAULT;
|
rc = -EFAULT;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
@ -1074,7 +1074,7 @@ long keyctl_instantiate_key_common(key_serial_t id,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
if (copy_from_iter(payload, plen, from) != plen)
|
if (!copy_from_iter_full(payload, plen, from))
|
||||||
goto error2;
|
goto error2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user