static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
{
- /* only init_inode_xattrs use non-atomic once */
+ /* the only user of kunmap() is 'init_inode_xattrs' */
if (unlikely(!atomic))
kunmap(it->page);
else
kunmap_atomic(it->kaddr);
+
unlock_page(it->page);
put_page(it->page);
}
-static void init_inode_xattrs(struct inode *inode)
+static inline void xattr_iter_end_final(struct xattr_iter *it)
+{
+ if (!it->page)
+ return;
+
+ xattr_iter_end(it, true);
+}
+
+static int init_inode_xattrs(struct inode *inode)
{
struct xattr_iter it;
unsigned i;
bool atomic_map;
if (likely(inode_has_inited_xattr(inode)))
- return;
+ return 0;
vi = EROFS_V(inode);
BUG_ON(!vi->xattr_isize);
it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
it.page = erofs_get_inline_page(inode, it.blkaddr);
- BUG_ON(IS_ERR(it.page));
+ if (IS_ERR(it.page))
+ return PTR_ERR(it.page);
/* read in shared xattr array (non-atomic, see kmalloc below) */
it.kaddr = kmap(it.page);
ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
vi->xattr_shared_count = ih->h_shared_count;
- vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
- vi->xattr_shared_count, sizeof(unsigned),
- GFP_KERNEL | __GFP_NOFAIL);
+ vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
+ sizeof(uint), GFP_KERNEL);
+ if (!vi->xattr_shared_xattrs) {
+ xattr_iter_end(&it, atomic_map);
+ return -ENOMEM;
+ }
/* let's skip ibody header */
it.ofs += sizeof(struct erofs_xattr_ibody_header);
it.page = erofs_get_meta_page(inode->i_sb,
++it.blkaddr, S_ISDIR(inode->i_mode));
- BUG_ON(IS_ERR(it.page));
+ if (IS_ERR(it.page))
+ return PTR_ERR(it.page);
it.kaddr = kmap_atomic(it.page);
atomic_map = true;
xattr_iter_end(&it, atomic_map);
inode_set_inited_xattr(inode);
+ return 0;
}
struct xattr_iter_handlers {
void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
};
-static void xattr_iter_fixup(struct xattr_iter *it)
+static inline int xattr_iter_fixup(struct xattr_iter *it)
{
- if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
- xattr_iter_end(it, true);
+ if (it->ofs < EROFS_BLKSIZ)
+ return 0;
- it->blkaddr += erofs_blknr(it->ofs);
- it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
- BUG_ON(IS_ERR(it->page));
+ xattr_iter_end(it, true);
- it->kaddr = kmap_atomic(it->page);
- it->ofs = erofs_blkoff(it->ofs);
+ it->blkaddr += erofs_blknr(it->ofs);
+ it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
+ if (IS_ERR(it->page)) {
+ int err = PTR_ERR(it->page);
+
+ it->page = NULL;
+ return err;
}
+
+ it->kaddr = kmap_atomic(it->page);
+ it->ofs = erofs_blkoff(it->ofs);
+ return 0;
}
static int inline_xattr_iter_begin(struct xattr_iter *it,
it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
it->page = erofs_get_inline_page(inode, it->blkaddr);
- BUG_ON(IS_ERR(it->page));
- it->kaddr = kmap_atomic(it->page);
+ if (IS_ERR(it->page))
+ return PTR_ERR(it->page);
+ it->kaddr = kmap_atomic(it->page);
return vi->xattr_isize - xattr_header_sz;
}
static int xattr_foreach(struct xattr_iter *it,
- struct xattr_iter_handlers *op, unsigned *tlimit)
+ const struct xattr_iter_handlers *op, unsigned int *tlimit)
{
struct erofs_xattr_entry entry;
unsigned value_sz, processed, slice;
int err;
/* 0. fixup blkaddr, ofs, ipage */
- xattr_iter_fixup(it);
+ err = xattr_iter_fixup(it);
+ if (err)
+ return err;
/*
* 1. read xattr entry to the memory,
if (it->ofs >= EROFS_BLKSIZ) {
BUG_ON(it->ofs > EROFS_BLKSIZ);
- xattr_iter_fixup(it);
+ err = xattr_iter_fixup(it);
+ if (err)
+ goto out;
it->ofs = 0;
}
while (processed < value_sz) {
if (it->ofs >= EROFS_BLKSIZ) {
BUG_ON(it->ofs > EROFS_BLKSIZ);
- xattr_iter_fixup(it);
+
+ err = xattr_iter_fixup(it);
+ if (err)
+ goto out;
it->ofs = 0;
}
memcpy(it->buffer + processed, buf, len);
}
-static struct xattr_iter_handlers find_xattr_handlers = {
+static const struct xattr_iter_handlers find_xattr_handlers = {
.entry = xattr_entrymatch,
.name = xattr_namematch,
.alloc_buffer = xattr_checkbuffer,
ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
if (ret >= 0)
break;
+
+ if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
+ break;
}
- xattr_iter_end(&it->it, true);
+ xattr_iter_end_final(&it->it);
return ret < 0 ? ret : it->buffer_size;
}
xattr_iter_end(&it->it, true);
it->it.page = erofs_get_meta_page(inode->i_sb,
- blkaddr, false);
- BUG_ON(IS_ERR(it->it.page));
+ blkaddr, false);
+ if (IS_ERR(it->it.page))
+ return PTR_ERR(it->it.page);
+
it->it.kaddr = kmap_atomic(it->it.page);
it->it.blkaddr = blkaddr;
}
ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
if (ret >= 0)
break;
+
+ if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
+ break;
}
if (vi->xattr_shared_count)
- xattr_iter_end(&it->it, true);
+ xattr_iter_end_final(&it->it);
return ret < 0 ? ret : it->buffer_size;
}
if (unlikely(name == NULL))
return -EINVAL;
- init_inode_xattrs(inode);
+ ret = init_inode_xattrs(inode);
+ if (ret)
+ return ret;
it.index = index;
return 1;
}
-static struct xattr_iter_handlers list_xattr_handlers = {
+static const struct xattr_iter_handlers list_xattr_handlers = {
.entry = xattr_entrylist,
.name = xattr_namelist,
.alloc_buffer = xattr_skipvalue,
if (ret < 0)
break;
}
- xattr_iter_end(&it->it, true);
+ xattr_iter_end_final(&it->it);
return ret < 0 ? ret : it->buffer_ofs;
}
xattr_iter_end(&it->it, true);
it->it.page = erofs_get_meta_page(inode->i_sb,
- blkaddr, false);
- BUG_ON(IS_ERR(it->it.page));
+ blkaddr, false);
+ if (IS_ERR(it->it.page))
+ return PTR_ERR(it->it.page);
+
it->it.kaddr = kmap_atomic(it->it.page);
it->it.blkaddr = blkaddr;
}
break;
}
if (vi->xattr_shared_count)
- xattr_iter_end(&it->it, true);
+ xattr_iter_end_final(&it->it);
return ret < 0 ? ret : it->buffer_ofs;
}
int ret;
struct listxattr_iter it;
- init_inode_xattrs(d_inode(dentry));
+ ret = init_inode_xattrs(d_inode(dentry));
+ if (ret)
+ return ret;
it.dentry = dentry;
it.buffer = buffer;