by default if CONFIG_EROFS_FS_XATTR is selected.
(no)acl Setup POSIX Access Control List. Note: acl is enabled
by default if CONFIG_EROFS_FS_POSIX_ACL is selected.
+cache_strategy=%s Select a strategy for cached decompression from now on:
+ disabled: In-place I/O decompression only;
+ readahead: Cache the last incomplete compressed physical
+ cluster for further reading. It still does
+ in-place I/O decompression for the rest
+ compressed physical clusters;
+ readaround: Cache the both ends of incomplete compressed
+ physical clusters for further reading.
+ It still does in-place I/O decompression
+ for the rest compressed physical clusters.
Module parameters
=================
than 2. Otherwise, the image cannot be mounted
correctly on this kernel.
-choice
- prompt "EROFS VLE Data Decompression mode"
- depends on EROFS_FS_ZIP
- default EROFS_FS_ZIP_CACHE_BIPOLAR
- help
- EROFS supports three options for VLE decompression.
- "In-place Decompression Only" consumes the minimum memory
- with lowest random read.
-
- "Bipolar Cached Decompression" consumes the maximum memory
- with highest random read.
-
- If unsure, select "Bipolar Cached Decompression"
-
-config EROFS_FS_ZIP_NO_CACHE
- bool "In-place Decompression Only"
- help
- Read compressed data into page cache and do in-place
- decompression directly.
-
-config EROFS_FS_ZIP_CACHE_UNIPOLAR
- bool "Unipolar Cached Decompression"
- help
- For each request, it caches the last compressed page
- for further reading.
- It still decompresses in place for the rest compressed pages.
-
-config EROFS_FS_ZIP_CACHE_BIPOLAR
- bool "Bipolar Cached Decompression"
- help
- For each request, it caches the both end compressed pages
- for further reading.
- It still decompresses in place for the rest compressed pages.
-
- Recommended for performance priority.
-
-endchoice
-
};
#endif /* CONFIG_EROFS_FAULT_INJECTION */
-#ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
-#define EROFS_FS_ZIP_CACHE_LVL (2)
-#elif defined(EROFS_FS_ZIP_CACHE_UNIPOLAR)
-#define EROFS_FS_ZIP_CACHE_LVL (1)
-#else
-#define EROFS_FS_ZIP_CACHE_LVL (0)
-#endif
-
-#if (!defined(EROFS_FS_HAS_MANAGED_CACHE) && (EROFS_FS_ZIP_CACHE_LVL > 0))
-#define EROFS_FS_HAS_MANAGED_CACHE
-#endif
-
/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
unsigned int shrinker_run_no;
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- struct inode *managed_cache;
-#endif
+ /* current strategy of how to use managed cache */
+ unsigned char cache_strategy;
+ /* pseudo inode to manage cached pages */
+ struct inode *managed_cache;
#endif /* CONFIG_EROFS_FS_ZIP */
u32 blocks;
u32 meta_blkaddr;
#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
#ifdef CONFIG_EROFS_FS_ZIP
+enum {
+ EROFS_ZIP_CACHE_DISABLED,
+ EROFS_ZIP_CACHE_READAHEAD,
+ EROFS_ZIP_CACHE_READAROUND
+};
+
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
/* basic unit of the workstation of a super_block */
}
#endif
+#ifdef CONFIG_EROFS_FS_ZIP
+static int erofs_build_cache_strategy(struct erofs_sb_info *sbi,
+ substring_t *args)
+{
+ const char *cs = match_strdup(args);
+ int err = 0;
+
+ if (!cs) {
+ errln("Not enough memory to store cache strategy");
+ return -ENOMEM;
+ }
+
+ if (!strcmp(cs, "disabled")) {
+ sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED;
+ } else if (!strcmp(cs, "readahead")) {
+ sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD;
+ } else if (!strcmp(cs, "readaround")) {
+ sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+ } else {
+ errln("Unrecognized cache strategy \"%s\"", cs);
+ err = -EINVAL;
+ }
+ kfree(cs);
+ return err;
+}
+#else
+static int erofs_build_cache_strategy(struct erofs_sb_info *sbi,
+ substring_t *args)
+{
+ infoln("EROFS compression is disabled, so cache strategy is ignored");
+ return 0;
+}
+#endif
+
+/* set up default EROFS parameters */
static void default_options(struct erofs_sb_info *sbi)
{
- /* set up some FS parameters */
#ifdef CONFIG_EROFS_FS_ZIP
+ sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
sbi->max_sync_decompress_pages = 3;
#endif
-
#ifdef CONFIG_EROFS_FS_XATTR
set_opt(sbi, XATTR_USER);
#endif
-
#ifdef CONFIG_EROFS_FS_POSIX_ACL
set_opt(sbi, POSIX_ACL);
#endif
Opt_acl,
Opt_noacl,
Opt_fault_injection,
+ Opt_cache_strategy,
Opt_err
};
{Opt_acl, "acl"},
{Opt_noacl, "noacl"},
{Opt_fault_injection, "fault_injection=%u"},
+ {Opt_cache_strategy, "cache_strategy=%s"},
{Opt_err, NULL}
};
if (err)
return err;
break;
-
+ case Opt_cache_strategy:
+ err = erofs_build_cache_strategy(EROFS_SB(sb), args);
+ if (err)
+ return err;
+ break;
default:
errln("Unrecognized mount option \"%s\" "
"or missing value", p);
return 0;
}
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-
+#ifdef CONFIG_EROFS_FS_ZIP
static const struct address_space_operations managed_cache_aops;
static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
DBG_BUGON(!sbi);
erofs_shrinker_unregister(sb);
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
+#ifdef CONFIG_EROFS_FS_ZIP
iput(sbi->managed_cache);
sbi->managed_cache = NULL;
#endif
if (test_opt(sbi, FAULT_INJECTION))
seq_printf(seq, ",fault_injection=%u",
erofs_get_fault_rate(sbi));
+#ifdef CONFIG_EROFS_FS_ZIP
+ if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) {
+ seq_puts(seq, ",cache_strategy=disabled");
+ } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) {
+ seq_puts(seq, ",cache_strategy=readahead");
+ } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) {
+ seq_puts(seq, ",cache_strategy=readaround");
+ } else {
+ seq_puts(seq, ",cache_strategy=(unknown)");
+ DBG_BUGON(1);
+ }
+#endif
return 0;
}
return count;
}
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-/* for cache-managed case, customized reclaim paths exist */
static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
{
erofs_workgroup_unfreeze(grp, 0);
return true;
}
-#else
-/* for nocache case, no customized reclaim path at all */
-static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp,
- bool cleanup)
-{
- int cnt = atomic_read(&grp->refcount);
-
- DBG_BUGON(cnt <= 0);
- DBG_BUGON(cleanup && cnt != 1);
-
- if (cnt > 1)
- return false;
-
- DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
- grp->index)) != grp);
-
- /* (rarely) could be grabbed again when freeing */
- erofs_workgroup_put(grp);
- return true;
-}
-
-#endif
-
static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
unsigned long nr_shrink,
bool cleanup)
static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
static DEFINE_MUTEX(z_pagemap_global_lock);
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
static void preload_compressed_pages(struct z_erofs_collector *clt,
struct address_space *mc,
enum z_erofs_cache_alloctype type,
}
return ret;
}
-#else
-static void preload_compressed_pages(struct z_erofs_collector *clt,
- struct address_space *mc,
- enum z_erofs_cache_alloctype type,
- struct list_head *pagepool)
-{
- /* nowhere to load compressed pages from */
-}
-#endif
/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
static inline bool try_inplace_io(struct z_erofs_collector *clt,
return page;
}
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
+ unsigned int cachestrategy,
erofs_off_t la)
{
+ if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
+ return false;
+
if (fe->backmost)
return true;
- if (EROFS_FS_ZIP_CACHE_LVL >= 2)
- return la < fe->headoffset;
-
- return false;
-}
-#else
-static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
- erofs_off_t la)
-{
- return false;
+ return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
+ la < fe->headoffset;
}
-#endif
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct page *page,
goto err_out;
/* preload all compressed pages (maybe downgrade role if necessary) */
- if (should_alloc_managed_pages(fe, map->m_la))
+ if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la))
cache_strategy = DELAYEDALLOC;
else
cache_strategy = DONTALLOC;
/* define decompression jobqueue types */
enum {
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
JQ_BYPASS,
-#endif
JQ_SUBMIT,
NR_JOBQUEUES,
};
struct z_erofs_unzip_io *fgq,
bool forcefg)
{
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
/*
* if managed cache is enabled, bypass jobqueue is needed,
* no need to read from device for all pclusters in this queue.
*/
q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
-#endif
q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
}
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
z_erofs_next_pcluster_t qtail[],
z_erofs_next_pcluster_t owned_head)
kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io));
return true;
}
-#else
-static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
- z_erofs_next_pcluster_t qtail[],
- z_erofs_next_pcluster_t owned_head)
-{
- /* impossible to bypass submission for managed cache disabled */
- DBG_BUGON(1);
-}
-
-static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[],
- unsigned int nr_bios,
- bool force_fg)
-{
- /* bios should be >0 if managed cache is disabled */
- DBG_BUGON(!nr_bios);
- return false;
-}
-#endif
static bool z_erofs_vle_submit_all(struct super_block *sb,
z_erofs_next_pcluster_t owned_head,
pagepool, io, force_fg))
return;
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
/* decompress no I/O pclusters immediately */
z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
-#endif
+
if (!force_fg)
return;
struct super_block *sb;
};
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
struct page *page)
{
return page->mapping == MNGD_MAPPING(sbi);
}
-#else
-#define MNGD_MAPPING(sbi) (NULL)
-static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
- struct page *page) { return false; }
-#endif /* !EROFS_FS_HAS_MANAGED_CACHE */
#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)