From: Gao Xiang hsiangkao@linux.alibaba.com
anolis inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB5UKT
Reference: https://gitee.com/anolis/cloud-kernel/commit/c5305cc0a9c206b10825c1b00b6f84c...
--------------------------------
ANBZ: #1666
commit c5aa903a59db274554718cddfda9039913409ec9 upstream.
Add runtime support for chunk-based uncompressed files described in the previous patch.
Link: https://lore.kernel.org/r/20210820100019.208490-2-hsiangkao@linux.alibaba.co... Reviewed-by: Liu Bo bo.liu@linux.alibaba.com Reviewed-by: Chao Yu chao@kernel.org Signed-off-by: Gao Xiang hsiangkao@linux.alibaba.com Signed-off-by: Huang Jianan jnhuang@linux.alibaba.com Reviewed-by: Gao Xiang hsiangkao@linux.alibaba.com Reviewed-by: Jeffle Xu jefflexu@linux.alibaba.com Signed-off-by: Zizhi Wo wozizhi@huawei.com Signed-off-by: Baokun Li libaokun1@huawei.com --- fs/erofs/data.c | 92 +++++++++++++++++++++++++++++++++++++++------ fs/erofs/inode.c | 18 ++++++++- fs/erofs/internal.h | 5 +++ 3 files changed, 103 insertions(+), 12 deletions(-)
diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 60f74fad069b..deed631dad0e 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2017-2018 HUAWEI, Inc. * https://www.huawei.com/ + * Copyright (C) 2021, Alibaba Cloud */ #include "internal.h" #include <linux/prefetch.h> @@ -59,13 +60,6 @@ static int erofs_map_blocks_flatmode(struct inode *inode, nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); lastblk = nblocks - tailendpacking;
- if (offset >= inode->i_size) { - /* leave out-of-bound access unmapped */ - map->m_flags = 0; - map->m_plen = 0; - goto out; - } - /* there is no hole in flatmode */ map->m_flags = EROFS_MAP_MAPPED;
@@ -100,14 +94,90 @@ static int erofs_map_blocks_flatmode(struct inode *inode, goto err_out; }
-out: map->m_llen = map->m_plen; - err_out: trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); return err; }
+static int erofs_map_blocks(struct inode *inode, + struct erofs_map_blocks *map, int flags) +{ + struct super_block *sb = inode->i_sb; + struct erofs_inode *vi = EROFS_I(inode); + struct erofs_inode_chunk_index *idx; + struct page *page; + u64 chunknr; + unsigned int unit; + erofs_off_t pos; + int err = 0; + + if (map->m_la >= inode->i_size) { + /* leave out-of-bound access unmapped */ + map->m_flags = 0; + map->m_plen = 0; + goto out; + } + + if (vi->datalayout != EROFS_INODE_CHUNK_BASED) + return erofs_map_blocks_flatmode(inode, map, flags); + + if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) + unit = sizeof(*idx); /* chunk index */ + else + unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */ + + chunknr = map->m_la >> vi->chunkbits; + pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + + vi->xattr_isize, unit) + unit * chunknr; + + page = erofs_get_meta_page(inode->i_sb, erofs_blknr(pos)); + if (IS_ERR(page)) + return PTR_ERR(page); + + map->m_la = chunknr << vi->chunkbits; + map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits, + roundup(inode->i_size - map->m_la, EROFS_BLKSIZ)); + + /* handle block map */ + if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { + __le32 *blkaddr = page_address(page) + erofs_blkoff(pos); + + if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) { + map->m_flags = 0; + } else { + map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr)); + map->m_flags = EROFS_MAP_MAPPED; + } + goto out_unlock; + } + /* parse chunk indexes */ + idx = page_address(page) + erofs_blkoff(pos); + switch (le32_to_cpu(idx->blkaddr)) { + case EROFS_NULL_ADDR: + map->m_flags = 0; + break; + default: + /* only one device is supported for now */ + if (idx->device_id) { + erofs_err(sb, "invalid device id %u @ %llu for nid %llu", + le16_to_cpu(idx->device_id), + chunknr, vi->nid); + err = -EFSCORRUPTED; + goto out_unlock; + } + map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr)); + map->m_flags = EROFS_MAP_MAPPED; + break; + } +out_unlock: + unlock_page(page); + put_page(page); +out: + map->m_llen = map->m_plen; + return err; +} + static inline struct bio *erofs_read_raw_page(struct bio *bio, struct address_space *mapping, struct page *page, @@ -143,7 +213,7 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio, erofs_blk_t blknr; unsigned int blkoff;
- err = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW); + err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); if (err) goto err_out;
@@ -310,7 +380,7 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block) return 0; }
- if (!erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW)) + if (!erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW)) return erofs_blknr(map.m_pa);
return 0; diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 5f9c6fb79ac0..ec2f7aac1085 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2017-2018 HUAWEI, Inc. * https://www.huawei.com/ + * Copyright (C) 2021, Alibaba Cloud */ #include "xattr.h"
@@ -122,7 +123,9 @@ static struct page *erofs_read_inode(struct inode *inode, /* total blocks for compressed files */ if (erofs_inode_is_data_compressed(vi->datalayout)) nblks = le32_to_cpu(die->i_u.compressed_blocks); - + else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) + /* fill chunked inode summary info */ + vi->chunkformat = le16_to_cpu(die->i_u.c.format); kfree(copied); break; case EROFS_INODE_LAYOUT_COMPACT: @@ -160,6 +163,8 @@ static struct page *erofs_read_inode(struct inode *inode, inode->i_size = le32_to_cpu(dic->i_size); if (erofs_inode_is_data_compressed(vi->datalayout)) nblks = le32_to_cpu(dic->i_u.compressed_blocks); + else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) + vi->chunkformat = le16_to_cpu(dic->i_u.c.format); break; default: erofs_err(inode->i_sb, @@ -169,6 +174,17 @@ static struct page *erofs_read_inode(struct inode *inode, goto err_out; }
+ if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { + if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_ALL)) { + erofs_err(inode->i_sb, + "unsupported chunk format %x of nid %llu", + vi->chunkformat, vi->nid); + err = -EOPNOTSUPP; + goto err_out; + } + vi->chunkbits = LOG_BLOCK_SIZE + + (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK); + } inode->i_mtime.tv_sec = inode->i_ctime.tv_sec; inode->i_atime.tv_sec = inode->i_ctime.tv_sec; inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 4ab73f7263c8..83d6c01575f8 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -2,6 +2,7 @@ /* * Copyright (C) 2017-2018 HUAWEI, Inc. * https://www.huawei.com/ + * Copyright (C) 2021, Alibaba Cloud */ #ifndef __EROFS_INTERNAL_H #define __EROFS_INTERNAL_H @@ -209,6 +210,10 @@ struct erofs_inode {
union { erofs_blk_t raw_blkaddr; + struct { + unsigned short chunkformat; + unsigned char chunkbits; + }; #ifdef CONFIG_EROFS_FS_ZIP struct { unsigned short z_advise;