1. export hash lock, alock and ocf_lru_* from OCF.
2. add weak attibute to ocf_engine_push_req_front and
ocf_engine_push_req_back to be overwritten with xcache queue handler.
3. add .flush_do_asynch_common to support flush metadata from
xcache.
4. overwritten IO, cleaner and metadata in OCF with xcache.
5. add xcache qos and evcting
Signed-off-by: Kemeng Shi <shikemeng(a)huaweicloud.com>
---
inc/ocf.h | 1 +
inc/ocf_def.h | 1 +
inc/xcache.h | 7 +
inc/xcache_cleaner.h | 6 +
inc/xcache_io.h | 90 ++++
src/cleaning/cleaning.c | 46 +-
src/cleaning/cleaning.h | 3 +
src/concurrency/ocf_metadata_concurrency.c | 4 +-
src/concurrency/ocf_metadata_concurrency.h | 7 +
src/engine/engine_common.c | 4 +-
src/engine/xcache_engine.c | 28 +
src/engine/xcache_engine.h | 8 +
src/engine/xcache_engine_common.c | 464 +++++++++++++++++
src/engine/xcache_engine_common.h | 216 ++++++++
src/engine/xcache_engine_flush.c | 140 +++++
src/engine/xcache_engine_flush.h | 25 +
src/engine/xcache_engine_rd.c | 366 +++++++++++++
src/engine/xcache_engine_rd.h | 6 +
src/engine/xcache_engine_wb.c | 211 ++++++++
src/engine/xcache_engine_wb.h | 6 +
src/engine/xcache_engine_wt.c | 210 ++++++++
src/engine/xcache_engine_wt.h | 6 +
src/evicting/deadline.c | 172 +++++++
src/evicting/deadline.h | 6 +
src/evicting/evicting.c | 3 +
src/evicting/evicting_helper.h | 32 ++
src/evicting/evicting_ops.h | 61 +++
src/metadata/metadata_raw.c | 87 +++-
src/metadata/metadata_raw.h | 40 ++
src/metadata/xcache_metadata.c | 88 ++++
src/metadata/xcache_metadata.h | 47 ++
src/mngt/ocf_mngt_cache.c | 11 +
src/ocf_cache_priv.h | 1 +
src/ocf_lru.c | 6 +-
src/ocf_lru.h | 7 +
src/ocf_queue.c | 11 +-
src/ocf_queue_priv.h | 2 +
src/qos/qos.c | 6 +
src/qos/qos.h | 27 +
src/qos/qos_lb.h | 143 ++++++
src/utils/utils_alock.c | 122 +++++
src/utils/utils_alock.h | 11 +
src/utils/utils_cache_line.c | 6 +-
src/xcache.c | 30 ++
src/xcache.h | 58 +++
src/xcache_cleaner.c | 572 +++++++++++++++++++++
src/xcache_cleaner.h | 69 +++
src/xcache_lru.c | 199 +++++++
src/xcache_lru.h | 9 +
src/xcache_ocf_core.c | 45 ++
src/xcache_queue.c | 337 ++++++++++++
src/xcache_queue.h | 35 ++
52 files changed, 4042 insertions(+), 56 deletions(-)
create mode 100644 inc/xcache.h
create mode 100644 inc/xcache_cleaner.h
create mode 100644 inc/xcache_io.h
create mode 100644 src/engine/xcache_engine.c
create mode 100644 src/engine/xcache_engine.h
create mode 100644 src/engine/xcache_engine_common.c
create mode 100644 src/engine/xcache_engine_common.h
create mode 100644 src/engine/xcache_engine_flush.c
create mode 100644 src/engine/xcache_engine_flush.h
create mode 100644 src/engine/xcache_engine_rd.c
create mode 100644 src/engine/xcache_engine_rd.h
create mode 100644 src/engine/xcache_engine_wb.c
create mode 100644 src/engine/xcache_engine_wb.h
create mode 100644 src/engine/xcache_engine_wt.c
create mode 100644 src/engine/xcache_engine_wt.h
create mode 100644 src/evicting/deadline.c
create mode 100644 src/evicting/deadline.h
create mode 100644 src/evicting/evicting.c
create mode 100644 src/evicting/evicting_helper.h
create mode 100644 src/evicting/evicting_ops.h
create mode 100644 src/metadata/xcache_metadata.c
create mode 100644 src/metadata/xcache_metadata.h
create mode 100644 src/qos/qos.c
create mode 100644 src/qos/qos.h
create mode 100644 src/qos/qos_lb.h
create mode 100644 src/xcache.c
create mode 100644 src/xcache.h
create mode 100644 src/xcache_cleaner.c
create mode 100644 src/xcache_cleaner.h
create mode 100644 src/xcache_lru.c
create mode 100644 src/xcache_lru.h
create mode 100644 src/xcache_ocf_core.c
create mode 100644 src/xcache_queue.c
create mode 100644 src/xcache_queue.h
diff --git a/inc/ocf.h b/inc/ocf.h
index 416d743..31137fc 100644
--- a/inc/ocf.h
+++ b/inc/ocf.h
@@ -32,5 +32,6 @@
#include "ocf_ctx.h"
#include "ocf_err.h"
#include "ocf_trace.h"
+#include "xcache.h"
#endif /* __OCF_H__ */
diff --git a/inc/ocf_def.h b/inc/ocf_def.h
index 89fb2e0..3466c3f 100644
--- a/inc/ocf_def.h
+++ b/inc/ocf_def.h
@@ -332,6 +332,7 @@ typedef enum {
*/
#define OCF_READ 0
#define OCF_WRITE 1
+#define OCF_FLUSH 2
/**
* @}
*/
diff --git a/inc/xcache.h b/inc/xcache.h
new file mode 100644
index 0000000..1080ba8
--- /dev/null
+++ b/inc/xcache.h
@@ -0,0 +1,7 @@
+#ifndef XCACHE_H__
+#define XCACHE_H__
+
+#include "xcache_io.h"
+#include "xcache_cleaner.h"
+
+#endif
diff --git a/inc/xcache_cleaner.h b/inc/xcache_cleaner.h
new file mode 100644
index 0000000..88c2760
--- /dev/null
+++ b/inc/xcache_cleaner.h
@@ -0,0 +1,6 @@
+#ifndef XCACHE_CLEANER_H__
+#define XCACHE_CLEANER_H__
+
+void xcache_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue);
+
+#endif
diff --git a/inc/xcache_io.h b/inc/xcache_io.h
new file mode 100644
index 0000000..5633a6c
--- /dev/null
+++ b/inc/xcache_io.h
@@ -0,0 +1,90 @@
+#ifndef XCACHE_IO_H__
+#define XCACHE_IO_H__
+
+#include "ocf_env.h"
+#include "ocf/ocf.h"
+
+enum entry_type {
+ XCACHE_IO_ENTRY,
+ XCACHE_BACKDEV_IO_ENTRY,
+ OCF_REQ_ENTRY,
+};
+
+struct queue_entry {
+ enum entry_type type;
+ struct list_head list;
+};
+
+#define INLINE_FLUSH_LINES 4
+struct xcache_io;
+typedef void (*xcache_io_end_fn)(struct xcache_io *io, int error);
+typedef int (*xcache_io_if)(struct xcache_io *io);
+struct xcache_io {
+ // queue_entry
+ enum entry_type type;
+ struct list_head queue_list;
+
+ xcache_io_end_fn end;
+ xcache_io_if io_if;
+ env_atomic remaining;
+ ctx_data_t *data;
+ int error;
+
+ ocf_queue_t io_queue;
+ ocf_cache_t cache;
+ ocf_core_t core;
+ uint64_t start_addr;
+ uint64_t size;
+ uint8_t rw;
+ uint8_t flags;
+
+ ocf_cache_line_t flush_lines[INLINE_FLUSH_LINES];
+ uint64_t flush_line_num;
+};
+
+struct backdev_io_end_arg {
+ uint64_t addr;
+ uint64_t size;
+ int error;
+};
+struct xcache_backdev_io;
+typedef int (*backdev_io_end_fn)(struct xcache_backdev_io *io, struct backdev_io_end_arg *arg);
+typedef void (*backdev_io_res_fn)(struct xcache_backdev_io *io);
+enum xcache_dir {
+ XCACHE_RD = 0,
+ XCACHE_WR,
+ XCACHE_FLUSH,
+};
+struct xcache_backdev_io {
+ // queue_entry
+ enum entry_type type;
+ struct list_head free_list;
+
+ struct xcache_io *xcache_io;
+ backdev_io_end_fn end;
+
+ ocf_cache_line_t line;
+ ctx_data_t *data;
+
+ union {
+ // for alock
+ struct {
+ backdev_io_res_fn io_res;
+ uint64_t addr;
+ uint64_t size;
+ };
+ // for io_end
+ struct {
+ int dev;
+ enum xcache_dir dir;
+ };
+ };
+
+ void *priv;
+};
+
+void xcache_backdev_io_end(struct xcache_backdev_io *bd_io, struct backdev_io_end_arg *arg);
+void xcache_submit_io(struct xcache_io *io);
+void spdk_backdev_submit_io(struct xcache_backdev_io *io_base, bool to_cache, uint64_t addr, uint64_t len, uint64_t offset, uint8_t dir);
+
+#endif
diff --git a/src/cleaning/cleaning.c b/src/cleaning/cleaning.c
index 94421f0..c7eef8f 100644
--- a/src/cleaning/cleaning.c
+++ b/src/cleaning/cleaning.c
@@ -70,47 +70,57 @@ static int _ocf_cleaner_run_check_dirty_inactive(ocf_cache_t cache)
return 1;
}
-static void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval)
+int ocf_cleaner_run_prepare(ocf_cleaner_t cleaner, ocf_queue_t queue)
{
ocf_cache_t cache = ocf_cleaner_get_cache(cleaner);
- ocf_mngt_cache_unlock(cache);
- ocf_queue_put(cleaner->io_queue);
- cleaner->end(cleaner, interval);
-}
-
-void ocf_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue)
-{
- ocf_cache_t cache;
-
- OCF_CHECK_NULL(cleaner);
- OCF_CHECK_NULL(queue);
-
- cache = ocf_cleaner_get_cache(cleaner);
-
/* Do not involve cleaning when cache is not running
* (error, etc.).
*/
if (!env_bit_test(ocf_cache_state_running, &cache->cache_state) ||
ocf_mngt_cache_is_locked(cache)) {
cleaner->end(cleaner, SLEEP_TIME_MS);
- return;
+ return -1;
}
/* Sleep in case there is management operation in progress. */
if (ocf_mngt_cache_trylock(cache)) {
cleaner->end(cleaner, SLEEP_TIME_MS);
- return;
+ return -1;
}
if (_ocf_cleaner_run_check_dirty_inactive(cache)) {
ocf_mngt_cache_unlock(cache);
cleaner->end(cleaner, SLEEP_TIME_MS);
- return;
+ return -1;
}
ocf_queue_get(queue);
cleaner->io_queue = queue;
+ return 0;
+}
+
+void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval)
+{
+ ocf_cache_t cache = ocf_cleaner_get_cache(cleaner);
+
+ ocf_mngt_cache_unlock(cache);
+ ocf_queue_put(cleaner->io_queue);
+ cleaner->end(cleaner, interval);
+}
+
+void ocf_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue)
+{
+ ocf_cache_t cache;
+
+ OCF_CHECK_NULL(cleaner);
+ OCF_CHECK_NULL(queue);
+
+ if (ocf_cleaner_run_prepare(cleaner, queue) != 0) {
+ return;
+ }
+
+ cache = ocf_cleaner_get_cache(cleaner);
ocf_cleaning_perform_cleaning(cache, ocf_cleaner_run_complete);
}
diff --git a/src/cleaning/cleaning.h b/src/cleaning/cleaning.h
index 007dac0..f514393 100644
--- a/src/cleaning/cleaning.h
+++ b/src/cleaning/cleaning.h
@@ -53,4 +53,7 @@ void ocf_kick_cleaner(ocf_cache_t cache);
void ocf_stop_cleaner(ocf_cache_t cache);
+int ocf_cleaner_run_prepare(ocf_cleaner_t cleaner, ocf_queue_t queue);
+void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval);
+
#endif
diff --git a/src/concurrency/ocf_metadata_concurrency.c b/src/concurrency/ocf_metadata_concurrency.c
index 52059a0..794e27f 100644
--- a/src/concurrency/ocf_metadata_concurrency.c
+++ b/src/concurrency/ocf_metadata_concurrency.c
@@ -212,7 +212,7 @@ void ocf_metadata_end_shared_access(struct ocf_metadata_lock *metadata_lock,
number. Preffered way to lock multiple hash buckets is to use
request lock rountines ocf_req_hash_(un)lock_(rd/wr).
*/
-static inline void ocf_hb_id_naked_lock(
+void ocf_hb_id_naked_lock(
struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
@@ -226,7 +226,7 @@ static inline void ocf_hb_id_naked_lock(
ENV_BUG();
}
-static inline void ocf_hb_id_naked_unlock(
+void ocf_hb_id_naked_unlock(
struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
diff --git a/src/concurrency/ocf_metadata_concurrency.h b/src/concurrency/ocf_metadata_concurrency.h
index 97262d8..43f4d1d 100644
--- a/src/concurrency/ocf_metadata_concurrency.h
+++ b/src/concurrency/ocf_metadata_concurrency.h
@@ -177,4 +177,11 @@ void ocf_collision_start_exclusive_access(struct ocf_metadata_lock *metadata_loc
uint32_t page);
void ocf_collision_end_exclusive_access(struct ocf_metadata_lock *metadata_lock,
uint32_t page);
+
+void ocf_hb_id_naked_lock(
+ struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw);
+void ocf_hb_id_naked_unlock(
+ struct ocf_metadata_lock *metadata_lock,
+ ocf_cache_line_t hash, int rw);
#endif
diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c
index a789b13..5b30250 100644
--- a/src/engine/engine_common.c
+++ b/src/engine/engine_common.c
@@ -584,7 +584,7 @@ void ocf_engine_update_request_stats(struct ocf_request *req)
req->info.hit_no, req->core_line_count);
}
-void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
+void __attribute__((weak)) ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
@@ -614,7 +614,7 @@ void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
ocf_queue_kick(q, allow_sync);
}
-void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
+void __attribute__((weak)) ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
{
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
diff --git a/src/engine/xcache_engine.c b/src/engine/xcache_engine.c
new file mode 100644
index 0000000..089afa4
--- /dev/null
+++ b/src/engine/xcache_engine.c
@@ -0,0 +1,28 @@
+#include "ocf/ocf.h"
+
+#include "xcache_engine.h"
+#include "xcache_engine_common.h"
+#include "xcache_engine_rd.h"
+#include "xcache_engine_wt.h"
+#include "xcache_engine_wb.h"
+#include "xcache_engine_flush.h"
+
+void xcache_get_io_if(struct xcache_io *io, ocf_cache_mode_t mode)
+{
+ if (io->rw == OCF_WRITE) {
+ switch (mode) {
+ case ocf_cache_mode_wb:
+ io->io_if = xcache_wb;
+ return;
+ case ocf_cache_mode_wt:
+ io->io_if = xcache_wt;
+ return;
+ default:
+ return;
+ }
+ } else if (io->rw == OCF_READ) {
+ io->io_if = xcache_read_generic;
+ } else {
+ io->io_if = xcache_flush;
+ }
+}
diff --git a/src/engine/xcache_engine.h b/src/engine/xcache_engine.h
new file mode 100644
index 0000000..f20444a
--- /dev/null
+++ b/src/engine/xcache_engine.h
@@ -0,0 +1,8 @@
+#ifndef __XCACHE_ENGINE_H_
+#define __XCACHE_ENGINE_H_
+
+#include "ocf/xcache.h"
+
+void xcache_get_io_if(struct xcache_io *io, ocf_cache_mode_t mode);
+
+#endif
diff --git a/src/engine/xcache_engine_common.c b/src/engine/xcache_engine_common.c
new file mode 100644
index 0000000..687c54c
--- /dev/null
+++ b/src/engine/xcache_engine_common.c
@@ -0,0 +1,464 @@
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_queue_priv.h"
+#include "../utils/utils_cache_line.h"
+#include "../metadata/metadata.h"
+
+#include "../xcache.h"
+#include "xcache_engine_common.h"
+#include "../xcache_lru.h"
+#include "../xcache_queue.h"
+#include "../evicting/evicting_ops.h"
+
+uint64_t cache_line_to_addr(ocf_cache_t cache, ocf_cache_line_t line, uint64_t line_offset)
+{
+ uint64_t addr;
+
+ addr = ocf_metadata_map_lg2phy(cache, line);
+ addr = xcache_line_to_addr(cache, addr);
+ addr += cache->device->metadata_offset;
+ addr += line_offset;
+ return addr;
+}
+
+ocf_cache_line_t addr_to_cache_line(ocf_cache_t cache, uint64_t addr)
+{
+ ocf_cache_line_t line;
+
+ addr -= cache->device->metadata_offset;
+ line = xcache_addr_to_line(cache, addr);
+ line = ocf_metadata_map_phy2lg(cache, line);
+ return line;
+}
+
+ocf_cache_line_t xcache_engine_lookup_line(struct ocf_cache *cache,
+ ocf_core_id_t core_id, uint64_t core_line)
+{
+ ocf_cache_line_t line;
+ ocf_cache_line_t hash;
+
+ hash = ocf_metadata_hash_func(cache, core_line, core_id);
+ line = ocf_metadata_get_hash(cache, hash);
+
+ while (line != cache->device->collision_table_entries) {
+ ocf_core_id_t curr_core_id;
+ uint64_t curr_core_line;
+
+ ocf_metadata_get_core_info(cache, line, &curr_core_id,
+ &curr_core_line);
+
+ if (core_id == curr_core_id && curr_core_line == core_line) {
+ return line;
+ }
+
+ line = ocf_metadata_get_collision_next(cache, line);
+ }
+
+ return INVALID_LINE;
+}
+
+void xcache_map_cache_line(struct xcache_io_context *ctx,
+ ocf_cache_line_t cache_line)
+{
+ ocf_cache_t cache = xcache_ctx_cache(ctx);
+ ocf_core_id_t core_id = ocf_core_get_id(xcache_ctx_core(ctx));
+ unsigned int hash_index = ctx->hash;
+ uint64_t core_line = ctx->core_line;
+
+ /* Add the block to the corresponding collision list */
+ ocf_metadata_start_collision_shared_access(cache, cache_line);
+ ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,
+ cache_line);
+ ocf_metadata_end_collision_shared_access(cache, cache_line);
+}
+
+static void xcache_engine_prepare_wait(void *io)
+{
+ struct xcache_backdev_io *base_io = (struct xcache_backdev_io *)io;
+ struct xcache_io *ocf_io = base_io->xcache_io;
+
+ // avoid xcache_io finish when waiting for lock
+ xcache_io_get(ocf_io);
+}
+
+static void xcache_engine_prepare_wake(void *io)
+{
+ struct xcache_backdev_io *base_io = (struct xcache_backdev_io *)io;
+
+ xcache_queue_push_backdev_io_front(base_io, false);
+}
+
+static void xcache_get_core_range(struct xcache_io_context *ctx,
+ uint64_t start_core_line, uint64_t end_core_line,
+ uint64_t *addr, uint64_t *size)
+{
+ struct xcache_io *io = ctx->io;
+ ocf_cache_t cache = xcache_ctx_cache(ctx);
+ uint64_t start_addr, end_addr, end;
+
+ start_addr = xcache_io_start_addr(io);
+ end_addr = start_addr + xcache_io_size(io);
+
+ *addr = xcache_line_to_addr(cache, start_core_line);
+ end = xcache_line_to_addr(cache, end_core_line);
+
+ if (*addr < start_addr) {
+ *addr = start_addr;
+ }
+ if (end > end_addr) {
+ end = end_addr;
+ }
+
+ *size = end - *addr;
+}
+
+static inline int xcache_engine_lock_wr(struct xcache_io_context *ctx, struct ocf_alock *alock,
+ uint64_t core_line, ocf_cache_line_t line,
+ backdev_io_res_fn io_res)
+{
+ ocf_queue_t q = xcache_ctx_queue(ctx);
+ struct xcache_backdev_io *base_io = xcache_queue_alloc_backdev_io(q);
+ int ret;
+
+ if (base_io == NULL) {
+ ocf_cache_log(xcache_ctx_cache(ctx), log_err, "alloc base io failed\n");
+ return -ENOMEM;
+ }
+
+ base_io->xcache_io = ctx->io;
+ base_io->line = line;
+ base_io->io_res = io_res;
+ base_io->data = ctx->io->data;
+ xcache_get_core_range(ctx, core_line, core_line + 1, &base_io->addr, &base_io->size);
+
+ ret = xcache_lock_wr(alock, line, xcache_engine_prepare_wait,
+ xcache_engine_prepare_wake, base_io);
+ if (ret != OCF_LOCK_NOT_ACQUIRED) {
+ xcache_queue_free_backdev_io(q, base_io);
+ }
+
+ return ret;
+}
+
+static inline void xcache_hash_lock_rd(ocf_cache_t cache, ocf_cache_line_t hash, uint8_t lock_idx)
+{
+ ocf_metadata_start_shared_access(&cache->metadata.lock,
+ lock_idx);
+ ocf_hb_id_naked_lock(&cache->metadata.lock, hash,
+ OCF_METADATA_RD);
+}
+
+static inline void xcache_hash_unlock_rd(ocf_cache_t cache, ocf_cache_line_t hash, uint8_t lock_idx)
+{
+ ocf_hb_id_naked_unlock(&cache->metadata.lock, hash,
+ OCF_METADATA_RD);
+ ocf_metadata_end_shared_access(&cache->metadata.lock,
+ lock_idx);
+}
+
+static inline void xcache_hash_lock_upgrade(ocf_cache_t cache, ocf_cache_line_t hash)
+{
+ ocf_hb_id_naked_unlock(&cache->metadata.lock, hash,
+ OCF_METADATA_RD);
+ ocf_hb_id_naked_lock(&cache->metadata.lock, hash,
+ OCF_METADATA_WR);
+}
+
+static inline void xcache_hash_unlock_wr(ocf_cache_t cache, ocf_cache_line_t hash, uint8_t lock_idx)
+{
+ ocf_hb_id_naked_unlock(&cache->metadata.lock, hash,
+ OCF_METADATA_WR);
+ ocf_metadata_end_shared_access(&cache->metadata.lock,
+ lock_idx);
+}
+
+int xcache_engine_get_line(struct xcache_io_context *ctx,
+ uint64_t core_line, ocf_cache_line_t *line,
+ backdev_io_res_fn io_res)
+{
+ ocf_cache_t cache = xcache_ctx_cache(ctx);
+ ocf_core_t core = xcache_ctx_core(ctx);
+ ocf_queue_t q = xcache_ctx_queue(ctx);
+ ocf_core_id_t core_id = ocf_core_get_id(core);
+ unsigned lock_idx = ocf_metadata_concurrency_next_idx(q);
+ ocf_cache_line_t hash = ocf_metadata_hash_func(cache, core_line, core_id);
+ struct ocf_alock *alock = ocf_cache_line_concurrency(cache);
+
+ int lock;
+
+ xcache_hash_lock_rd(cache, hash, lock_idx);
+ *line = xcache_engine_lookup_line(cache, core_id, core_line);
+ if (*line != INVALID_LINE) {
+ lock = xcache_engine_lock_wr(ctx, alock, core_line, *line, io_res);
+ xcache_hash_unlock_rd(cache, hash, lock_idx);
+ goto found;
+ }
+
+ xcache_hash_lock_upgrade(cache, hash);
+ *line = xcache_engine_lookup_line(cache, core_id, core_line);
+ if (unlikely(*line) != INVALID_LINE) {
+ lock = xcache_engine_lock_wr(ctx, alock, core_line, *line, io_res);
+ xcache_hash_unlock_wr(cache, hash, lock_idx);
+ goto found;
+ }
+
+ ctx->core_line = core_line;
+ ctx->hash = hash;
+ *line = xcache_get_cline(ctx);
+ lock = OCF_LOCK_ACQUIRED;
+ xcache_hash_unlock_wr(cache, hash, lock_idx);
+ if (*line == INVALID_LINE) {
+ xcache_clean(ctx, 128);
+ return lock;
+ }
+
+found:
+ evicting_line_accessed(xcache_get_ctx(cache), *line);
+ return lock;
+}
+
+void xcache_init_io_ctx(struct xcache_io_context *ctx, struct xcache_io *io)
+{
+ uint64_t addr = xcache_io_start_addr(io);
+ uint64_t size = xcache_io_size(io);
+
+ ctx->io = io;
+ ctx->part_id = PARTITION_DEFAULT;
+ ctx->offset = 0;
+ ctx->start_miss_line = INVALID_CORE_LINE;
+ ctx->hit_no = 0;
+ ctx->cache_bytes = 0;
+ ctx->core_bytes = 0;
+ xcache_io_get_line_range(xcache_io_cache(io), addr, size,
+ &ctx->core_line_first, &ctx->core_line_last);
+}
+
+void xcache_update_stat(struct xcache_io_context *ctx)
+{
+ struct xcache_io *io = ctx->io;
+ uint64_t len = xcache_io_size(io);
+
+ ocf_core_stats_request_update(xcache_ctx_core(ctx), PARTITION_DEFAULT,
+ io->rw, ctx->hit_no, ctx->core_line_last - ctx->core_line_first + 1);
+ ocf_core_stats_vol_block_update(xcache_ctx_core(ctx), PARTITION_DEFAULT,
+ io->rw, len);
+ ocf_core_stats_core_block_update(xcache_ctx_core(ctx), PARTITION_DEFAULT,
+ io->rw, ctx->core_bytes);
+ ocf_core_stats_cache_block_update(xcache_ctx_core(ctx), PARTITION_DEFAULT,
+ io->rw, ctx->cache_bytes);
+}
+
+static void xcache_queue_miss_line(struct xcache_io_context *ctx, uint64_t core_line)
+{
+ if (ctx->start_miss_line == INVALID_CORE_LINE) {
+ ctx->start_miss_line = core_line;
+ }
+}
+
+static int xcache_submit_miss_line(struct xcache_io_context *ctx, uint64_t end_miss_line, xcache_line_miss_fn submit_func)
+{
+ struct xcache_io *io = ctx->io;
+
+ struct xcache_backdev_io *base_io;
+ uint64_t start_addr, end_addr;
+ uint64_t addr, end;
+
+ if (submit_func == NULL) {
+ ctx->start_miss_line = INVALID_CORE_LINE;
+ return 0;
+ }
+
+ if (ctx->start_miss_line == INVALID_CORE_LINE) {
+ return 0;
+ }
+
+ base_io = xcache_queue_alloc_backdev_io(xcache_ctx_queue(ctx));
+ if (base_io == NULL) {
+ ocf_cache_log(xcache_ctx_cache(ctx), log_err, "alloc base io failed\n");
+ io->error = -ENOMEM;
+ return -ENOMEM;
+ }
+ base_io->xcache_io = io;
+ base_io->data = io->data;
+
+ start_addr = xcache_io_start_addr(io);
+ end_addr = start_addr + xcache_io_size(io);
+ addr = xcache_line_to_addr(xcache_ctx_cache(ctx), ctx->start_miss_line);
+ end = xcache_line_to_addr(xcache_ctx_cache(ctx), end_miss_line);
+ if (addr < start_addr) {
+ addr = start_addr;
+ }
+ if (end > end_addr) {
+ end = end_addr;
+ }
+
+ submit_func(base_io, addr, end - addr, addr - start_addr);
+
+ ctx->start_miss_line = INVALID_CORE_LINE;
+ return 0;
+}
+
+int xcache_foreach_line(struct xcache_io_context *ctx, xcache_line_handle_func func, void *priv)
+{
+ int ret = 0;
+ struct xcache_io *io = ctx->io;
+ uint64_t start_addr = xcache_io_start_addr(io);
+ uint64_t len = xcache_io_size(io);
+
+ uint64_t addr, line_end, line_size, size;
+ uint8_t start_sector, end_sector, line_sectors;
+ uint64_t core_line_first, core_line_last, core_line_cur;
+
+ core_line_first = ctx->core_line_first;
+ core_line_last = ctx->core_line_last;
+ addr = start_addr;
+ line_size = ocf_line_size(xcache_ctx_cache(ctx));
+ line_sectors = BYTES_TO_SECTORS(line_size);
+ line_end = xcache_line_to_addr(xcache_ctx_cache(ctx), core_line_first + 1);
+ start_sector = xcache_sector_offset(xcache_ctx_cache(ctx), BYTES_TO_SECTORS(addr));
+
+ for (core_line_cur = core_line_first;
+ core_line_cur <= core_line_last;
+ core_line_cur++) {
+ if (core_line_cur == core_line_last) {
+ end_sector = xcache_sector_offset(xcache_ctx_cache(ctx), BYTES_TO_SECTORS(start_addr + len - 1));
+ size = start_addr + len - addr;
+ } else {
+ end_sector = line_sectors - 1;
+ size = line_end - addr;
+ }
+
+ ret = func(priv, core_line_cur,
+ addr, size, start_sector, end_sector);
+ if (ret) {
+ io->error = ret;
+ break;
+ }
+ ctx->offset += size;
+ addr += size;
+ line_end += line_size;
+ start_sector = 0;
+ }
+
+ return 0;
+}
+
+static int xcache_wr_lb_common_end(struct xcache_backdev_io *backdev_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *io = backdev_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ ocf_cache_line_t line = backdev_io->line;
+ uint8_t start_secotr, last_sector;
+
+ xcache_get_sectors_range(cache, cb_arg->addr, cb_arg->size, &start_secotr, &last_sector);
+ ocf_metadata_start_collision_shared_access(cache, line);
+ metadata_clear_valid_sec(cache, line, start_secotr, last_sector);
+ ocf_metadata_end_collision_shared_access(cache, line);
+
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ xcache_queue_free_backdev_io(xcache_io_queue(backdev_io->xcache_io), backdev_io);
+ return 0;
+}
+
+void xcache_wr_lb_common(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size)
+{
+ struct xcache_io *io = backdev_io->xcache_io;
+ uint64_t start_addr = xcache_io_start_addr(io);
+ uint64_t buf_offset = addr - start_addr;
+
+ backdev_io->line = line;
+ backdev_io->end = xcache_wr_lb_common_end;
+ xcache_backdev_submit_io(backdev_io, false, addr, size, buf_offset, OCF_WRITE);
+}
+
+static inline bool xcache_engine_need_lb(struct xcache_io_handler *handler, struct xcache_line_range *line_range)
+{
+ struct xcache_io_context *ctx = handler->ctx;
+ struct xcache_io *io = ctx->io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ xcache_context_t *xcache_ctx = xcache_get_ctx(cache);
+ struct backdev_io *backdev_io = NULL;
+
+ if (handler->lb_fn == NULL) {
+ return false;
+ }
+
+ if (handler->need_lb_fn != NULL &&
+ !handler->need_lb_fn(xcache_io_cache(io), line_range)) {
+ return false;
+ }
+
+ return xcache_qos_need_lb(&xcache_ctx->qos, xcache_io_dir(io), line_range->size);
+}
+
+static int xcache_handle_line(void *priv,
+ uint64_t core_line, uint64_t addr, uint64_t size,
+ uint8_t start_sector, uint8_t last_sector)
+{
+ struct xcache_io_handler *handler = (struct xcache_io_handler *)priv;
+ struct xcache_io_context *ctx = handler->ctx;
+ struct xcache_io *io = ctx->io;
+ ocf_cache_t cache = xcache_ctx_cache(ctx);
+ struct xcache_backdev_io *backdev_io;
+ struct xcache_line_range line_range = {
+ .addr = addr,
+ .size = size,
+ .start_sector = start_sector,
+ .last_sector = last_sector,
+ };
+
+ ocf_cache_line_t line;
+ int lock;
+
+ lock = xcache_engine_get_line(ctx, core_line, &line, handler->res_fn);
+ if (lock < 0) {
+ ocf_cache_log(cache, log_err, "try to wait for lock failed\n");
+ return -ENOMEM;
+ }
+
+ if (line == INVALID_LINE || (lock == OCF_LOCK_ACQUIRED &&
+ handler->valid_fn != NULL && handler->valid_fn(cache, line, start_sector, last_sector))) {
+ xcache_queue_miss_line(ctx, core_line);
+ ctx->core_bytes += size;
+ return 0;
+ }
+
+ xcache_submit_miss_line(ctx, core_line, handler->miss_fn);
+ if (lock == OCF_LOCK_NOT_ACQUIRED) {
+ ctx->hit_no++;
+ ctx->cache_bytes += size;
+ return 0;
+ }
+
+ backdev_io = xcache_alloc_backdev_io(io);
+ if (backdev_io == NULL) {
+ ocf_cache_log(cache, log_err, "alloc base io failed\n");
+ return -ENOMEM;
+ }
+
+ line_range.cache_line = line;
+ if (xcache_engine_need_lb(handler, &line_range))
+ {
+ ctx->core_bytes += size;
+ handler->lb_fn(backdev_io, line, addr, size);
+ return 0;
+ }
+
+ ctx->hit_no++;
+ ctx->cache_bytes += size;
+ handler->hit_fn(backdev_io, line, SECTORS_TO_BYTES(start_sector), size, ctx->offset);
+ return 0;
+}
+
+int xcache_handle_io(struct xcache_io_handler *handler)
+{
+ struct xcache_io_context *ctx = handler->ctx;
+ int ret;
+
+ ret = xcache_foreach_line(ctx, xcache_handle_line, (void *)handler);
+ if (ret != 0) {
+ return ret;
+ }
+ return xcache_submit_miss_line(ctx, ctx->core_line_last + 1, handler->miss_fn);
+}
diff --git a/src/engine/xcache_engine_common.h b/src/engine/xcache_engine_common.h
new file mode 100644
index 0000000..fd73519
--- /dev/null
+++ b/src/engine/xcache_engine_common.h
@@ -0,0 +1,216 @@
+#ifndef XCACHE_ENGINE_COMMON_H_
+#define XCACHE_ENGINE_COMMON_H_
+
+#include "../ocf_cache_priv.h"
+#include "../utils/utils_cache_line.h"
+#include "../ocf_def_priv.h"
+
+#include "../xcache.h"
+#include "../xcache_queue.h"
+
+#define INVALID_CORE_LINE ((uint64_t)-1)
+#define INVALID_LINE ((ocf_cache_line_t)-1)
+
+struct xcache_io_context {
+ struct xcache_io *io;
+ ocf_queue_t queue;
+ ocf_part_id_t part_id;
+ uint64_t offset;
+ uint64_t core_line_first;
+ uint64_t core_line_last;
+ uint64_t core_line;
+ ocf_cache_line_t hash;
+
+ uint64_t start_miss_line;
+
+ uint64_t hit_no;
+ uint64_t cache_bytes;
+ uint64_t core_bytes;
+};
+
+static inline uint8_t xcache_addr_offset(ocf_cache_t cache, uint64_t addr)
+{
+ return addr & (ocf_line_size(cache) - 1);
+}
+
+static inline uint8_t xcache_sector_offset(ocf_cache_t cache, uint64_t sector)
+{
+ return sector & (ocf_line_sectors(cache) - 1);
+}
+
+static inline void xcache_get_sectors_range(ocf_cache_t cache, uint64_t addr, uint64_t size, uint8_t *start_sector, uint8_t *last_sector)
+{
+ uint64_t offset = xcache_addr_offset(cache, addr);
+ *start_sector = BYTES_TO_SECTORS(offset);
+ *last_sector = BYTES_TO_SECTORS(offset + size - 1);
+}
+
+static inline void xcache_io_get_line_range(ocf_cache_t cache, uint64_t addr, uint64_t size,
+ uint64_t *line_first, uint64_t *line_last)
+{
+ *line_first = xcache_addr_to_line(cache, addr);
+ *line_last = xcache_addr_to_line(cache, addr + size - 1);
+}
+
+ocf_cache_line_t xcache_engine_lookup_line(struct ocf_cache *cache,
+ ocf_core_id_t core_id, uint64_t core_line);
+int xcache_engine_get_line(struct xcache_io_context *ctx,
+ uint64_t core_line, ocf_cache_line_t *line,
+ backdev_io_res_fn io_res);
+
+void xcache_map_cache_line(struct xcache_io_context *ctx,
+ ocf_cache_line_t cache_line);
+
+uint64_t cache_line_to_addr(ocf_cache_t cache, ocf_cache_line_t line, uint64_t line_offset);
+ocf_cache_line_t addr_to_cache_line(ocf_cache_t cache, uint64_t addr);
+
+struct xcache_line_range {
+ ocf_cache_line_t cache_line;
+ uint64_t core_line;
+ uint64_t addr;
+ uint64_t size;
+ uint64_t start_sector;
+ uint64_t last_sector;
+};
+
+typedef int (*xcache_line_handle_func)(void *priv,
+ uint64_t core_line, uint64_t addr, uint64_t size,
+ uint8_t start_sector, uint8_t last_sector);
+
+void xcache_init_io_ctx(struct xcache_io_context *ctx, struct xcache_io *io);
+int xcache_foreach_line(struct xcache_io_context *ctx, xcache_line_handle_func func, void *priv);
+void xcache_update_stat(struct xcache_io_context *ctx);
+
+static inline uint64_t xcache_io_start_addr(struct xcache_io *io)
+{
+ return io->start_addr;
+}
+
+static inline uint64_t xcache_io_size(struct xcache_io *io)
+{
+ return io->size;
+}
+
+static inline uint8_t xcache_io_flags(struct xcache_io *io)
+{
+ return io->flags;
+}
+
+static inline ocf_core_t xcache_io_core(struct xcache_io *io)
+{
+ return io->core;
+}
+
+static inline ocf_cache_t xcache_io_cache(struct xcache_io *io)
+{
+ return io->cache;
+}
+
+static inline ocf_queue_t xcache_io_queue(struct xcache_io *io)
+{
+ return io->io_queue;
+}
+
+static inline int xcache_io_dir(struct xcache_io *io)
+{
+ return io->rw;
+}
+
+static inline ocf_core_t xcache_ctx_core(struct xcache_io_context *ctx)
+{
+ return xcache_io_core(ctx->io);
+}
+
+static inline ocf_cache_t xcache_ctx_cache(struct xcache_io_context *ctx)
+{
+ return xcache_io_cache(ctx->io);
+}
+
+static inline ocf_queue_t xcache_ctx_queue(struct xcache_io_context *ctx)
+{
+ return xcache_io_queue(ctx->io);
+}
+
+static inline struct xcache_backdev_io *xcache_alloc_backdev_io(struct xcache_io *io)
+{
+ struct xcache_backdev_io *backdev_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
+
+ if (backdev_io == NULL) {
+ return NULL;
+ }
+
+ backdev_io->xcache_io = io;
+ backdev_io->data = io->data;
+ return backdev_io;
+}
+
+typedef int (*xcache_line_valid_fn)(ocf_cache_t cache, ocf_cache_line_t line,
+ uint8_t start_sector, uint8_t last_sector);
+typedef int (*xcache_line_hit_fn)(struct xcache_backdev_io *base_io, ocf_cache_line_t line,
+ uint64_t offset, uint64_t size, uint64_t buf_offset);
+typedef int (*xcache_line_miss_fn)(struct xcache_backdev_io *base_io, uint64_t addr,
+ uint64_t size, uint64_t buf_offset);
+typedef int (*xcache_line_need_lb_fn)(ocf_cache_t cache, struct xcache_line_range *line_range);
+typedef void (*xcache_line_lb_fn)(struct xcache_backdev_io *base_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size);
+
+struct xcache_io_handler {
+ struct xcache_io_context *ctx;
+ xcache_line_valid_fn valid_fn;
+ xcache_line_hit_fn hit_fn;
+ xcache_line_miss_fn miss_fn;
+ backdev_io_res_fn res_fn;
+ xcache_line_need_lb_fn need_lb_fn;
+ xcache_line_lb_fn lb_fn;
+};
+int xcache_handle_io(struct xcache_io_handler *handler);
+
+void xcache_wr_lb_common(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size);
+
+static inline void xcache_io_get(struct xcache_io *io)
+{
+ env_atomic_inc_return(&io->remaining);
+}
+
+static inline void xcache_io_end(struct xcache_io *io, int error)
+{
+ if (io->end)
+ io->end(io, error);
+
+}
+
+static inline void xcache_io_put(struct xcache_io *io)
+{
+ if (env_atomic_dec_return(&io->remaining))
+ return;
+
+ xcache_io_end(io, io->error);
+}
+
+static inline xcache_context_t *backdev_io_to_xcache_ctx(struct xcache_backdev_io *io_base)
+{
+ struct xcache_io *io = io_base->xcache_io;
+ ocf_queue_t q = io->io_queue;
+ ocf_cache_t cache = q->cache;
+ return xcache_get_ctx(cache);
+}
+
+static inline void xcache_backdev_submit_io(struct xcache_backdev_io *io_base, bool cached, uint64_t addr, uint64_t size, uint64_t buf_offset, uint8_t dir)
+{
+ struct xcache_io *io = io_base->xcache_io;
+ xcache_context_t *xcache_ctx = backdev_io_to_xcache_ctx(io_base);
+
+ io_base->dir = dir;
+ if (cached) {
+ io_base->dev = CACHE_DEV;
+ xcache_qos_load_add(&xcache_ctx->qos, CACHE_DEV, dir, size);
+ } else {
+ io_base->dev = CORE_DEV;
+ xcache_qos_load_add(&xcache_ctx->qos, CORE_DEV, dir, size);
+ }
+
+ xcache_io_get(io);
+ spdk_backdev_submit_io(io_base, cached, addr, size, buf_offset, dir);
+}
+#endif /* XCACHE_ENGINE_COMMON_H_ */
diff --git a/src/engine/xcache_engine_flush.c b/src/engine/xcache_engine_flush.c
new file mode 100644
index 0000000..6aaf28a
--- /dev/null
+++ b/src/engine/xcache_engine_flush.c
@@ -0,0 +1,140 @@
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "../ocf_queue_priv.h"
+#include "../xcache_queue.h"
+
+#include "xcache_engine_common.h"
+#include "xcache_engine_flush.h"
+
+env_atomic g_need_flush = 0;
+
+static inline int xcache_do_flush(struct xcache_io *io);
+
+static inline int xcache_push_flush_io(struct xcache_io *io)
+{
+ ocf_queue_t q = xcache_io_queue(io);
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+
+ INIT_LIST_HEAD(&io->queue_list);
+ list_add_tail(&io->queue_list, &queue_ctx->flush_io_list);
+ env_atomic_inc(&queue_ctx->flush_io_no);
+ return env_atomic_read(&queue_ctx->flush_io_no);
+}
+
+static inline struct xcache_io *xcache_pop_flush_io(ocf_queue_t q)
+{
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+ struct xcache_io *flush_io = list_first_entry(&queue_ctx->flush_io_list,
+ struct xcache_io, queue_list);
+
+ env_atomic_dec(&queue_ctx->flush_io_no);
+ list_del(&flush_io->queue_list);
+
+ return flush_io;
+}
+
+static void xcache_flush_finish(struct xcache_io *io)
+{
+ ocf_queue_t q = io->io_queue;
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+ struct xcache_io *finish_io;
+ struct xcache_io *flush_io;
+
+ for (finish_io = xcache_pop_flush_io(q);
+ finish_io != io;
+ finish_io = xcache_pop_flush_io(q))
+ {
+ xcache_io_put(finish_io);
+ }
+ xcache_io_put(io);
+
+ if (env_atomic_read(&queue_ctx->flush_io_no) != 0) {
+ flush_io = list_entry(queue_ctx->flush_io_list.prev, struct xcache_io, queue_list);
+ xcache_do_flush(flush_io);
+ }
+}
+
+static int xcache_flush_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+
+ if (cb_arg->error != 0) {
+ ocf_cache_log(cache, log_err, "flush with error %d\n", cb_arg->error);
+ io->error = cb_arg->error;
+ }
+ xcache_queue_free_backdev_io(xcache_io_queue(io), base_io);
+
+ if (env_atomic_read(&io->remaining) == 2) {
+ xcache_flush_finish(io);
+ }
+ return 0;
+}
+
+static int xcache_flush_core(struct xcache_io *io)
+{
+ uint64_t addr = xcache_io_start_addr(io);
+ uint64_t size = xcache_io_size(io);
+ ocf_cache_t cache = xcache_io_cache(io);
+ struct xcache_backdev_io *base_io = NULL;
+
+ base_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
+ if (base_io == NULL) {
+ ocf_cache_log(cache, log_err, "alloc base io failed\n");
+ return -ENOMEM;
+ }
+
+ base_io->xcache_io = io;
+ base_io->end = xcache_flush_cb;
+ xcache_backdev_submit_io(base_io, false, addr, size, 0, OCF_FLUSH);
+ return 0;
+}
+
+static int xcache_flush_cache(struct xcache_io *io)
+{
+ ocf_cache_t cache = xcache_io_cache(io);
+ struct xcache_backdev_io *base_io = NULL;
+
+ base_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
+ if (base_io == NULL) {
+ ocf_cache_log(cache, log_err, "alloc base io failed\n");
+ return -ENOMEM;
+ }
+
+ base_io->xcache_io = io;
+ base_io->end = xcache_flush_cb;
+ xcache_backdev_submit_io(base_io, true, 0, 0, 0, OCF_FLUSH);
+ return 0;
+}
+
+static inline int xcache_do_flush(struct xcache_io *io)
+{
+ if (xcache_flush_core(io) != 0) {
+ io->error = -ENOMEM;
+ return -ENOMEM;
+ }
+
+ if (xcache_flush_cache(io) != 0) {
+ io->error = -ENOMEM;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int xcache_flush(struct xcache_io *io)
+{
+ io->error = 0;
+
+ if (!need_flush()) {
+ return 0;
+ }
+ clear_flush();
+
+ xcache_io_get(io);
+ if (xcache_push_flush_io(io) != 1) {
+ return 0;
+ }
+
+ return xcache_do_flush(io);
+}
diff --git a/src/engine/xcache_engine_flush.h b/src/engine/xcache_engine_flush.h
new file mode 100644
index 0000000..91738d8
--- /dev/null
+++ b/src/engine/xcache_engine_flush.h
@@ -0,0 +1,25 @@
+#ifndef XCACHE_ENGINE_FLUSH_H_
+#define XCACHE_ENGINE_FLUSH_H_
+
+#include "ocf/ocf.h"
+
+extern env_atomic g_need_flush;
+
+static inline void mark_flush(void)
+{
+ env_atomic_set(&g_need_flush, 1);
+}
+
+static inline void clear_flush(void)
+{
+ env_atomic_set(&g_need_flush, 0);
+}
+
+static inline bool need_flush(void)
+{
+ return (env_atomic_read(&g_need_flush) == 1);
+}
+
+int xcache_flush(struct xcache_io *io);
+
+#endif
diff --git a/src/engine/xcache_engine_rd.c b/src/engine/xcache_engine_rd.c
new file mode 100644
index 0000000..4b64975
--- /dev/null
+++ b/src/engine/xcache_engine_rd.c
@@ -0,0 +1,366 @@
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "../concurrency/ocf_concurrency.h"
+#include "../utils/utils_cache_line.h"
+#include "../metadata/metadata.h"
+#include "../ocf_def_priv.h"
+
+#include "../xcache.h"
+#include "xcache_engine_rd.h"
+#include "xcache_engine_common.h"
+#include "../xcache_queue.h"
+
+static inline void xcache_read_bf_done(ocf_cache_t cache, struct xcache_backdev_io *base_io)
+{
+ ocf_queue_t q = xcache_io_queue(base_io->xcache_io);
+
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), base_io->line);
+ xcache_queue_free_line_data(q, cache, base_io->data);
+ base_io->data = NULL;
+ xcache_queue_free_backdev_io(q, base_io);
+}
+
+static int xcache_read_bf_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *bf_io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(bf_io);
+ ocf_core_t core = xcache_io_core(bf_io);
+
+ if (cb_arg->error) {
+ ocf_cache_log(cache, log_err, "read bf failed\n");
+ ocf_core_stats_cache_error_update(core, OCF_WRITE);
+ }
+ xcache_read_bf_done(cache, base_io);
+ return 0;
+}
+
+static int xcache_read_bf_update_metadata(struct xcache_backdev_io *base_io, uint64_t addr, uint64_t size)
+{
+ ocf_core_t core = xcache_io_core(base_io->xcache_io);
+ ocf_cache_t cache = ocf_core_get_cache(core);
+ ocf_cache_line_t line = base_io->line;
+ ocf_part_id_t part_id = PARTITION_DEFAULT;
+ uint8_t start_sector = xcache_sector_offset(cache, BYTES_TO_SECTORS(addr));
+ uint8_t end_sector = xcache_sector_offset(cache, BYTES_TO_SECTORS(addr + size - 1));
+
+ // set_cache_line_valid
+ ocf_metadata_start_collision_shared_access(cache, line);
+ if (unlikely(metadata_test_valid_sec(cache, line, start_sector, end_sector))) {
+ ocf_metadata_end_collision_shared_access(cache, line);
+ return -1;
+ }
+
+ if (metadata_set_valid_sec_changed(cache, line, start_sector, end_sector)) {
+ /*
+ * Update the number of cached data for that core object
+ */
+ env_atomic_inc(&core->runtime_meta->cached_clines);
+ env_atomic_inc(&core->runtime_meta->
+ part_counters[part_id].cached_clines);
+ }
+ ocf_metadata_end_collision_shared_access(cache, line);
+ return 0;
+}
+
+static ctx_data_t *xcache_get_bf_data(struct xcache_io *io, uint64_t addr, uint64_t size)
+{
+ ocf_cache_t cache = xcache_io_cache(io);
+ uint64_t start_byte = xcache_io_start_addr(io);
+ uint64_t from = addr - start_byte;
+ ctx_data_t *dst;
+
+ dst = xcache_queue_alloc_line_data(xcache_io_queue(io), cache);
+ if (dst == NULL) {
+ return NULL;
+ }
+
+ ctx_data_cpy(cache->owner, dst, io->data, 0, from, size);
+ return dst;
+}
+
+static int xcache_do_read_bf(struct xcache_backdev_io *base_io, uint64_t addr, uint64_t size)
+{
+ struct xcache_io *bf_io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(bf_io);
+ ocf_cache_line_t line = base_io->line;
+
+ int ret;
+ uint64_t cache_addr;
+
+ ret = xcache_read_bf_update_metadata(base_io, addr, size);
+ if (ret != 0) {
+ goto out;
+ }
+
+ cache_addr = cache_line_to_addr(cache, line, xcache_addr_offset(cache, addr));
+ base_io->end = xcache_read_bf_cb;
+ xcache_backdev_submit_io(base_io, true, cache_addr, size, 0, OCF_WRITE);
+ return 0;
+
+out:
+ xcache_read_bf_done(cache, base_io);
+ return ret;
+}
+
+static void xcache_read_bf_error(struct xcache_io *io, uint64_t addr, uint64_t size)
+{
+ ocf_cache_t cache = xcache_io_cache(io);
+ ocf_core_t core = xcache_io_core(io);
+ ocf_core_id_t core_id = ocf_core_get_id(core);
+ struct ocf_alock *alock = ocf_cache_line_concurrency(cache);
+
+ uint64_t core_line_first, core_line_last, core_line;
+ ocf_cache_line_t line;
+
+ xcache_io_get_line_range(cache, addr, size, &core_line_first, &core_line_last);
+ for (core_line = core_line_first; core_line <= core_line_last; core_line++) {
+ line = xcache_engine_lookup_line(cache, core_id, core_line);
+ if (line != INVALID_LINE) {
+ xcache_unlock_wr(alock, line);
+ }
+ }
+
+ ocf_core_stats_core_error_update(core, OCF_READ);
+}
+
+static void bf_io_end(struct xcache_io *bf_io, int error)
+{
+ xcache_queue_free_xcache_io(bf_io->io_queue, bf_io);
+}
+
+static struct xcache_io *xcache_get_bf_xcache_io(struct xcache_io *ori_io)
+{
+ struct xcache_io *bf_io = xcache_queue_alloc_xcache_io(ori_io->io_queue);
+
+ if (bf_io == NULL) {
+ ocf_cache_log(xcache_io_cache(ori_io), log_err, "alloc bf io failed\n");
+ return NULL;
+ }
+ bf_io->io_queue = ori_io->io_queue;
+ bf_io->cache = ori_io->cache;
+ bf_io->core = ori_io->core;
+ bf_io->error = 0;
+ bf_io->end = bf_io_end;
+ env_atomic_set(&bf_io->remaining, 1);
+ return bf_io;
+}
+
+static void xcache_free_bf_xcache_io(struct xcache_io *bf_io)
+{
+ xcache_queue_free_xcache_io(bf_io->io_queue, bf_io);
+}
+
+static int xcache_submit_read_bf_line(struct xcache_io *io, struct xcache_io *bf_io, uint64_t bf_addr, uint64_t bf_size, ocf_cache_line_t line)
+{
+ ocf_cache_t cache = xcache_io_cache(bf_io);
+ struct xcache_backdev_io *base_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
+ if (base_io == NULL) {
+ ocf_cache_log(cache, log_err, "alloc bf base_io failed\n");
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ return -1;
+ }
+
+ base_io->data = xcache_get_bf_data(io, bf_addr, bf_size);
+ if (base_io->data == NULL) {
+ ocf_cache_log(cache, log_err, "alloc bf_data failed\n");
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ xcache_queue_free_backdev_io(xcache_io_queue(io), base_io);
+ return -1;
+ }
+ base_io->xcache_io = bf_io;
+ base_io->line = line;
+
+ return xcache_do_read_bf(base_io, bf_addr, bf_size);
+}
+
+static void xcache_submit_read_bf(struct xcache_io *io, uint64_t addr, uint64_t size)
+{
+ ocf_cache_t cache = xcache_io_cache(io);
+ ocf_core_t core = xcache_io_core(io);
+ ocf_core_id_t core_id = ocf_core_get_id(core);
+ uint64_t line_size = ocf_line_size(cache);
+
+ uint64_t core_line_first, core_line_last, core_line;
+ ocf_cache_line_t line;
+ uint64_t bf_addr, bf_size;
+ struct xcache_backdev_io *base_io;
+ struct xcache_io *bf_io;
+
+ bf_io = xcache_get_bf_xcache_io(io);
+ if (bf_io == NULL) {
+ ocf_cache_log(cache, log_err, "alloc bf_io failed\n");
+ xcache_read_bf_error(io, addr, size);
+ return;
+ }
+
+ xcache_io_get_line_range(cache, addr, size, &core_line_first, &core_line_last);
+ bf_addr = addr;
+ bf_size = xcache_line_to_addr(cache, core_line_first + 1) - bf_addr;
+ for (core_line = core_line_first; core_line <= core_line_last;
+ core_line++, bf_addr += bf_size, bf_size = line_size) {
+ if (core_line == core_line_last) {
+ bf_size = (addr + size) - bf_addr;
+ }
+
+ line = xcache_engine_lookup_line(cache, core_id, core_line);
+ if (line == INVALID_LINE) {
+ continue;
+ }
+
+ if (xcache_submit_read_bf_line(io, bf_io, bf_addr, bf_size, line) != 0) {
+ ocf_cache_log(cache, log_err, "read bf line failed\n");
+ continue;
+ }
+ }
+
+ xcache_io_put(bf_io);
+}
+
+static int xcache_read_bf(struct xcache_backdev_io *parent_base_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *io = parent_base_io->xcache_io;
+
+ xcache_queue_free_backdev_io(xcache_io_queue(parent_base_io->xcache_io), parent_base_io);
+
+ if (cb_arg->error) {
+ ocf_cache_log(xcache_io_cache(io), log_err, "read_miss failed with error %d\n",
+ cb_arg->error);
+
+ xcache_read_bf_error(io, cb_arg->addr, cb_arg->size);
+ io->error = cb_arg->error;
+ return cb_arg->error;
+ }
+
+ xcache_submit_read_bf(io, cb_arg->addr, cb_arg->size);
+
+ return 0;
+}
+
+static int xcache_read_miss(struct xcache_backdev_io *base_io,
+ uint64_t addr, uint64_t size, uint64_t buf_offset)
+{
+ base_io->end = xcache_read_bf;
+
+ xcache_backdev_submit_io(base_io, false, addr, size, buf_offset, OCF_READ);
+ return 0;
+}
+
+static int xcache_read_pt_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ ocf_core_t core = xcache_io_core(io);
+
+ if (cb_arg->error) {
+ ocf_cache_log(cache, log_err, "read pt failed\n");
+ ocf_core_stats_core_error_update(core, OCF_READ);
+ }
+
+ xcache_queue_free_backdev_io(xcache_io_queue(base_io->xcache_io), base_io);
+ return 0;
+}
+
+static int xcache_read_hit_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ ocf_core_t core = xcache_io_core(io);
+ ocf_cache_line_t line = base_io->line;
+
+ if (cb_arg->error) {
+ ocf_cache_log(cache, log_err, "read hit failed\n");
+ io->error = cb_arg->error;
+ ocf_core_stats_cache_error_update(core, OCF_READ);
+ }
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ xcache_queue_free_backdev_io(xcache_io_queue(base_io->xcache_io), base_io);
+ return 0;
+}
+
+static int xcache_read_hit(struct xcache_backdev_io *base_io, ocf_cache_line_t line,
+ uint64_t offset, uint64_t size, uint64_t buf_offset)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ uint64_t addr = cache_line_to_addr(cache, line, offset);
+
+ base_io->end = xcache_read_hit_cb;
+ base_io->line = line;
+
+ xcache_backdev_submit_io(base_io, true, addr, size, buf_offset, OCF_READ);
+ return 0;
+}
+
+static void xcache_read_res(struct xcache_backdev_io *base_io)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ uint64_t start_addr = xcache_io_start_addr(io);
+
+ uint8_t start_sector, last_sector;
+ uint64_t buf_offset = base_io->addr - start_addr;
+
+ start_sector = BYTES_TO_SECTORS(xcache_addr_offset(cache, base_io->addr));
+ last_sector = start_sector + BYTES_TO_SECTORS(base_io->size - 1);
+
+ if (metadata_test_valid_sec(cache, base_io->line, start_sector, last_sector)) {
+ xcache_read_hit(base_io, base_io->line, SECTORS_TO_BYTES(start_sector),
+ base_io->size, buf_offset);
+ } else {
+ xcache_read_miss(base_io, base_io->addr, base_io->size, buf_offset);
+ }
+}
+
+static int xcache_read_line_valid(ocf_cache_t cache, ocf_cache_line_t line,
+ uint8_t start_sector, uint8_t last_sector)
+{
+ return !metadata_test_valid_sec(cache, line, start_sector, last_sector);
+}
+
+static int xcache_read_lb_cb(struct xcache_backdev_io *backdev_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *io = backdev_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), backdev_io->line);
+ xcache_queue_free_backdev_io(xcache_io_queue(backdev_io->xcache_io), backdev_io);
+ return 0;
+}
+
+static void xcache_read_lb(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size)
+{
+ struct xcache_io *io = backdev_io->xcache_io;
+ uint64_t start_addr = xcache_io_start_addr(io);
+ uint64_t buf_offset = addr - start_addr;
+
+ backdev_io->end = xcache_read_lb_cb;
+ backdev_io->line = line;
+ xcache_backdev_submit_io(backdev_io, false, addr, size, buf_offset, OCF_READ);
+}
+
+int xcache_read_generic(struct xcache_io *io)
+{
+ int ret;
+ struct xcache_io_context ctx;
+ struct xcache_io_handler read_handler = {
+ .ctx = &ctx,
+ .res_fn = xcache_read_res,
+ .valid_fn = xcache_read_line_valid,
+ .miss_fn = xcache_read_miss,
+ .hit_fn = xcache_read_hit,
+ .need_lb_fn = NULL,
+ .lb_fn = xcache_read_lb,
+ };
+
+ xcache_init_io_ctx(&ctx, io);
+
+ ret = xcache_handle_io(&read_handler);
+ if (ret != 0) {
+ io->error = ret;
+ return ret;
+ }
+
+ xcache_update_stat(&ctx);
+ return 0;
+}
diff --git a/src/engine/xcache_engine_rd.h b/src/engine/xcache_engine_rd.h
new file mode 100644
index 0000000..3b939d4
--- /dev/null
+++ b/src/engine/xcache_engine_rd.h
@@ -0,0 +1,6 @@
+#ifndef XCACHE_ENGINE_RD_H_
+#define XCACHE_ENGINE_RD_H_
+
+int xcache_read_generic(struct xcache_io *io);
+
+#endif /* XCACHE_ENGINE_RD_H_ */
diff --git a/src/engine/xcache_engine_wb.c b/src/engine/xcache_engine_wb.c
new file mode 100644
index 0000000..cd07c96
--- /dev/null
+++ b/src/engine/xcache_engine_wb.c
@@ -0,0 +1,211 @@
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "../metadata/metadata.h"
+#include "../utils/utils_cache_line.h"
+#include "../concurrency/ocf_cache_line_concurrency.h"
+
+#include "xcache_engine_common.h"
+#include "xcache_engine_wb.h"
+#include "xcache_engine_flush.h"
+#include "../xcache_queue.h"
+#include "../metadata/xcache_metadata.h"
+
+static bool xcache_wb_update_metadata(ocf_cache_t cache, ocf_core_t core,
+ ocf_cache_line_t line, uint8_t start_sector, uint8_t last_sector)
+{
+ ocf_part_id_t part_id = PARTITION_DEFAULT;
+ struct ocf_part *part = &cache->user_parts[part_id].part;
+ bool need_flush = false;
+
+ bool line_was_dirty;
+
+ ocf_metadata_start_collision_shared_access(cache, line);
+ if (metadata_set_valid_sec_changed(cache, line, start_sector, last_sector)) {
+ env_atomic_inc(&core->runtime_meta->cached_clines);
+ env_atomic_inc(&core->runtime_meta->
+ part_counters[part_id].cached_clines);
+ }
+
+ if (metadata_set_dirty_sec_changed(cache, line, start_sector, last_sector,
+ &line_was_dirty)) {
+ if (!line_was_dirty) {
+ /*
+ * If this is first dirty cline set dirty timestamp
+ */
+ if (!env_atomic64_read(&core->runtime_meta->dirty_since))
+ env_atomic64_cmpxchg(
+ &core->runtime_meta->dirty_since, 0,
+ env_ticks_to_secs(env_get_tick_count()));
+
+ /*
+ * Update the number of dirty cached data for that
+ * core object
+ */
+ env_atomic_inc(&core->runtime_meta->dirty_clines);
+
+ /*
+ * increment dirty clines statistic for given cline
+ */
+ env_atomic_inc(&core->runtime_meta->
+ part_counters[part_id].dirty_clines);
+
+ need_flush = true;
+ }
+ }
+ ocf_metadata_end_collision_shared_access(cache, line);
+ return need_flush;
+}
+
+static void xcache_wb_flush_metadata_end(void *flush_io, int error)
+{
+ struct xcache_io *io = (struct xcache_io *)flush_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ struct ocf_alock *lock = ocf_cache_line_concurrency(cache);
+ ocf_cache_line_t *flush_lines = xcache_io_get_flush_line(io);
+ uint64_t i;
+
+ for (i = 0; i < io->flush_line_num; i++) {
+ xcache_unlock_wr(lock, flush_lines[i]);
+ }
+ xcache_io_free_flush_line(io);
+
+ if (error) {
+ ocf_cache_log(cache, log_err, "wb flush metadata failed with error %d\n", error);
+ io->error = error;
+ }
+
+ xcache_io_put(io);
+}
+
+static int xcache_wb_hit_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
+{
+ uint64_t addr = cb_arg->addr;
+ uint64_t len = cb_arg->size;
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_core_t core = xcache_io_core(io);
+ ocf_cache_t cache = ocf_core_get_cache(core);
+ int ret = 0;
+ ocf_cache_line_t line = addr_to_cache_line(cache, addr);
+
+ uint64_t offset;
+ uint8_t start_sector, last_sector;
+
+ xcache_queue_free_backdev_io(xcache_io_queue(base_io->xcache_io), base_io);
+
+ if (cb_arg->error) {
+ ocf_cache_log(cache, log_err, "wb write hit failed with error %d\n", cb_arg->error);
+ ret = cb_arg->error;
+ io->error = cb_arg->error;
+ ocf_core_stats_cache_error_update(core, OCF_WRITE);
+ goto out;
+ }
+
+ offset = xcache_addr_offset(cache, addr);
+ start_sector = BYTES_TO_SECTORS(offset);
+ last_sector = BYTES_TO_SECTORS(offset + len - 1);
+
+ if (xcache_wb_update_metadata(cache, core, line, start_sector, last_sector)) {
+ ret = xcache_io_add_flush_line(io, line);
+ if (ret != 0) {
+ ocf_cache_log(cache, log_err, "wb metadata add flush line failed with error %d\n", ret);
+ io->error = ret;
+ goto out;
+ }
+ if (xcache_metadata_should_flush(io)) {
+ xcache_io_metadata_flush(io, xcache_wb_flush_metadata_end);
+ }
+ return 0;
+ }
+
+out:
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ return ret;
+}
+
+static int xcache_wb_hit(struct xcache_backdev_io *base_io, ocf_cache_line_t line,
+ uint64_t offset, uint64_t size, uint64_t buf_offset)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+
+ uint64_t addr;
+
+ addr = cache_line_to_addr(cache, line, offset);
+ base_io->end = xcache_wb_hit_cb;
+ xcache_backdev_submit_io(base_io, true, addr, size, buf_offset, OCF_WRITE);
+ return 0;
+}
+
+static int xcache_wb_miss_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ ocf_core_t core = xcache_io_core(io);
+
+ if (cb_arg->error != 0) {
+ ocf_cache_log(cache, log_err, "wb miss with error %d\n", cb_arg->error);
+ io->error = cb_arg->error;
+ ocf_core_stats_core_error_update(core, OCF_WRITE);
+ }
+ xcache_queue_free_backdev_io(xcache_io_queue(base_io->xcache_io), base_io);
+ return 0;
+}
+
+static int xcache_wb_miss(struct xcache_backdev_io *base_io, uint64_t addr,
+ uint64_t size, uint64_t buf_offset)
+{
+ base_io->end = xcache_wb_miss_cb;
+ xcache_backdev_submit_io(base_io, false, addr, size, buf_offset, OCF_WRITE);
+ return 0;
+}
+
+static void xcache_wb_res(struct xcache_backdev_io *base_io)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ uint64_t offset = xcache_addr_offset(cache, base_io->addr);
+ uint64_t start_addr = xcache_io_start_addr(io);
+ uint64_t buf_offset = base_io->addr - start_addr;
+
+ xcache_wb_hit(base_io, base_io->line, offset, base_io->size, buf_offset);
+}
+
+// bypass dirty sectors to core will make additional cache io to update valid bit
+static int xcache_wb_need_lb(ocf_cache_t cache, struct xcache_line_range *range)
+{
+ return !metadata_test_dirty_sec(cache, range->cache_line, range->start_sector, range->last_sector);
+}
+
+static void xcache_wb_lb(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size)
+{
+ xcache_wr_lb_common(backdev_io, line, addr, size);
+}
+
+int xcache_wb(struct xcache_io *io)
+{
+ int ret;
+ struct xcache_io_context ctx;
+ struct xcache_io_handler wb_handler = {
+ .ctx = &ctx,
+ .res_fn = xcache_wb_res,
+ .valid_fn = NULL,
+ .miss_fn = xcache_wb_miss,
+ .hit_fn = xcache_wb_hit,
+ .need_lb_fn = xcache_wb_need_lb,
+ .lb_fn = xcache_wb_lb,
+ };
+
+ mark_flush();
+
+ io->flush_line_num = 0;
+ xcache_init_io_ctx(&ctx, io);
+
+ ret = xcache_handle_io(&wb_handler);
+ if (ret != 0) {
+ return ret;
+ }
+
+ xcache_update_stat(&ctx);
+ return 0;
+}
diff --git a/src/engine/xcache_engine_wb.h b/src/engine/xcache_engine_wb.h
new file mode 100644
index 0000000..26dba02
--- /dev/null
+++ b/src/engine/xcache_engine_wb.h
@@ -0,0 +1,6 @@
+#ifndef XCACHE_ENGINE_WB_H_
+#define XCACHE_ENGINE_WB_H_
+
+int xcache_wb(struct xcache_io *io);
+
+#endif /* XCACHE_ENGINE_WB_H_ */
diff --git a/src/engine/xcache_engine_wt.c b/src/engine/xcache_engine_wt.c
new file mode 100644
index 0000000..2c08c5f
--- /dev/null
+++ b/src/engine/xcache_engine_wt.c
@@ -0,0 +1,210 @@
+#include "ocf/ocf.h"
+#include "../ocf_cache_priv.h"
+#include "../utils/utils_cache_line.h"
+#include "../metadata/metadata.h"
+
+#include "xcache_engine_wt.h"
+#include "xcache_engine_common.h"
+#include "xcache_engine_flush.h"
+#include "../xcache_queue.h"
+
+static void xcache_wt_udpate_metadata(ocf_cache_t cache, ocf_core_t core,
+ ocf_cache_line_t line, uint8_t start_sector, uint8_t last_sector)
+{
+ ocf_part_id_t part_id = PARTITION_DEFAULT;
+ struct ocf_part *part = &cache->user_parts[part_id].part;
+
+ bool line_is_clean;
+
+ ocf_metadata_start_collision_shared_access(cache, line);
+ // ocf_set_valid_map_info
+ if (metadata_set_valid_sec_changed(cache, line, start_sector, last_sector)) {
+ env_atomic_inc(&core->runtime_meta->cached_clines);
+ env_atomic_inc(&core->runtime_meta->
+ part_counters[part_id].cached_clines);
+ }
+
+ // set_cache_line_clean
+ if (metadata_clear_dirty_sec_changed(cache, line, start_sector, last_sector,
+ &line_is_clean)) {
+ if (line_is_clean) {
+ /*
+ * Update the number of dirty cached data for that
+ * core object
+ */
+ if (env_atomic_dec_and_test(&core->runtime_meta->
+ dirty_clines)) {
+ /*
+ * If this is last dirty cline reset dirty
+ * timestamp
+ */
+ env_atomic64_set(&core->runtime_meta->
+ dirty_since, 0);
+ }
+
+ /*
+ * decrement dirty clines statistic for given cline
+ */
+ env_atomic_dec(&core->runtime_meta->
+ part_counters[part_id].dirty_clines);
+ }
+ }
+ ocf_metadata_end_collision_shared_access(cache, line);
+}
+
+static int xcache_wt_hit_core_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_core_t core = xcache_io_core(io);
+ ocf_cache_t cache = ocf_core_get_cache(core);
+ int ret = 0;
+
+
+ if (cb_arg->error) {
+ ret = cb_arg->error;
+ ocf_cache_log(cache, log_err, "wt hit core failed with error %d\n", cb_arg->error);
+ io->error = cb_arg->error;
+ ocf_core_stats_core_error_update(core, OCF_WRITE);
+ }
+
+ xcache_queue_free_backdev_io(xcache_io_queue(io), base_io);
+ return ret;
+}
+
+static int xcache_wt_hit_cache_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
+{
+ uint64_t addr = cb_arg->addr;
+ uint64_t len = cb_arg->size;
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ ocf_core_t core = xcache_io_core(io);
+ ocf_cache_line_t line = base_io->line;
+ int ret = 0;
+
+ uint64_t offset;
+ uint8_t start_sector, last_sector;
+
+ if (cb_arg->error != 0) {
+ ocf_cache_log(cache, log_err, "wt hit cache with error %d\n", cb_arg->error);
+ io->error = cb_arg->error;
+ ocf_core_stats_cache_error_update(core, OCF_WRITE);
+ ret = cb_arg->error;
+ goto out;
+ }
+
+ offset = xcache_addr_offset(cache, addr);
+ start_sector = BYTES_TO_SECTORS(offset);
+ last_sector = BYTES_TO_SECTORS(offset + len - 1);
+
+ xcache_wt_udpate_metadata(cache, core, line, start_sector, last_sector);
+
+out:
+ xcache_unlock_wr(ocf_cache_line_concurrency(cache), line);
+ xcache_queue_free_backdev_io(xcache_io_queue(base_io->xcache_io), base_io);
+ return ret;
+}
+
+static int xcache_wt_hit_cache(struct xcache_backdev_io *base_io, ocf_cache_line_t line,
+ uint64_t offset, uint64_t size, uint64_t buf_offset)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ uint64_t addr = cache_line_to_addr(cache, line, offset);
+
+ base_io->line = line;
+ base_io->end = xcache_wt_hit_cache_cb;
+ xcache_backdev_submit_io(base_io, true, addr, size, buf_offset, OCF_WRITE);
+ return 0;
+}
+
+static inline int xcache_wt_hit(struct xcache_backdev_io *base_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t offset, uint64_t size, uint64_t buf_offset)
+{
+ return xcache_wt_hit_cache(base_io, line, offset, size, buf_offset);
+}
+
+static void xcache_wt_res(struct xcache_backdev_io *base_io)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ uint64_t offset = xcache_addr_offset(cache, base_io->addr);
+ uint64_t start_addr = xcache_io_start_addr(io);
+ uint64_t buf_offset = base_io->addr - start_addr;
+
+ xcache_wt_hit(base_io, base_io->line, base_io->addr, offset, base_io->size, buf_offset);
+}
+
+static int xcache_wt_miss_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_io *io = base_io->xcache_io;
+ ocf_cache_t cache = xcache_io_cache(io);
+ ocf_core_t core = xcache_io_core(io);
+
+ if (cb_arg->error != 0) {
+ ocf_cache_log(cache, log_err, "wt miss with error %d\n", cb_arg->error);
+ io->error = cb_arg->error;
+ ocf_core_stats_core_error_update(core, OCF_WRITE);
+ }
+ xcache_queue_free_backdev_io(xcache_io_queue(base_io->xcache_io), base_io);
+ return 0;
+}
+
+static int xcache_wt_core(struct xcache_io_context *ctx)
+{
+ struct xcache_io *io = ctx->io;
+ uint64_t addr = xcache_io_start_addr(io);
+ uint64_t size = xcache_io_size(io);
+ ocf_cache_t cache = xcache_io_cache(io);
+ struct xcache_backdev_io *base_io = NULL;
+
+ base_io = xcache_queue_alloc_backdev_io(xcache_io_queue(io));
+ if (base_io == NULL) {
+ ocf_cache_log(cache, log_err, "alloc base io failed\n");
+ return -ENOMEM;
+ }
+ base_io->xcache_io = io;
+ base_io->data = io->data;
+ base_io->end = xcache_wt_miss_cb;
+ xcache_backdev_submit_io(base_io, false, addr, size, 0, OCF_WRITE);
+ return 0;
+}
+
+static void xcache_wt_lb(struct xcache_backdev_io *backdev_io, ocf_cache_line_t line,
+ uint64_t addr, uint64_t size)
+{
+ xcache_wr_lb_common(backdev_io, line, addr, size);
+}
+
+int xcache_wt(struct xcache_io *io)
+{
+ struct xcache_io_context ctx;
+ struct xcache_io_handler wt_handler = {
+ .ctx = &ctx,
+ .res_fn = xcache_wt_res,
+ .valid_fn = NULL,
+ .miss_fn = NULL,
+ .hit_fn = xcache_wt_hit_cache,
+ .need_lb_fn = NULL,
+ .lb_fn = xcache_wt_lb,
+ };
+ int ret;
+
+ mark_flush();
+
+ xcache_init_io_ctx(&ctx, io);
+
+ if (xcache_wt_core(&ctx) != 0) {
+ io->error = -ENOMEM;
+ return -ENOMEM;
+ }
+
+ ret = xcache_handle_io(&wt_handler);
+ if (ret != 0) {
+ io->error = ret;
+ return ret;
+ }
+
+ ctx.core_bytes = xcache_io_size(io);
+ xcache_update_stat(&ctx);
+ return 0;
+}
diff --git a/src/engine/xcache_engine_wt.h b/src/engine/xcache_engine_wt.h
new file mode 100644
index 0000000..d2cc7e7
--- /dev/null
+++ b/src/engine/xcache_engine_wt.h
@@ -0,0 +1,6 @@
+#ifndef XCACHE_ENGINE_WT_H_
+#define XCACHE_ENGINE_WT_H_
+
+int xcache_wt(struct xcache_io *io);
+
+#endif /* XCACHE_ENGINE_WT_H_ */
diff --git a/src/evicting/deadline.c b/src/evicting/deadline.c
new file mode 100644
index 0000000..0050faf
--- /dev/null
+++ b/src/evicting/deadline.c
@@ -0,0 +1,172 @@
+#include <stdint.h>
+
+#include "deadline.h"
+#include "evicting_ops.h"
+#include "evicting_helper.h"
+#include "../xcache.h"
+
+#define STALE_SECS 600
+#define EVICT_BUNCH 10
+#define CLEAN_BUNCH 32
+
+struct deadline_ctrl {
+ uint32_t *access_tick;
+ uint64_t line_num;
+ xcache_line_t evict_iter_line;
+ xcache_line_t clean_iter_line;
+};
+
+struct deadline_iter {
+ xcache_line_t *lines;
+ int line_num;
+ int iter_bunch;
+ xcache_line_t *iter_start;
+ bool line_dirty;
+};
+
+static int deadline_init(xcache_context_t *ctx)
+{
+ struct deadline_ctrl *ctrl = env_malloc(sizeof(struct deadline_ctrl), 0);
+
+ if (ctrl == NULL) {
+ return -1;
+ }
+ ctrl->line_num = evicting_line_num(ctx);
+
+ ctrl->access_tick = env_zalloc(sizeof(uint32_t) * ctrl->line_num, 0);
+ if (ctrl->access_tick == NULL) {
+ goto alloc_tick_fail;
+ }
+
+ ctrl->evict_iter_line = 0;
+ ctrl->clean_iter_line = 0;
+ xcache_set_evicting(ctx, (void *)ctrl);
+ return 0;
+
+alloc_tick_fail:
+ env_free(ctrl);
+ return -1;
+}
+
+static void deadline_fini(xcache_context_t *ctx)
+{
+ struct deadline_ctrl *ctrl = (struct deadline_ctrl *)xcache_get_evicting(ctx);
+
+ if (ctrl == NULL) {
+ return;
+ }
+
+ xcache_set_evicting(ctx, NULL);
+
+ if (ctrl->access_tick != NULL) {
+ env_free(ctrl->access_tick);
+ ctrl->access_tick = NULL;
+ }
+
+ env_free(ctrl);
+}
+
+static void deadline_line_accessed(xcache_context_t *ctx, xcache_line_t line)
+{
+ struct deadline_ctrl *ctrl = (struct deadline_ctrl *)xcache_get_evicting(ctx);
+
+ ctrl->access_tick[line] = env_ticks_to_secs(env_get_tick_count());
+}
+
+static void deadline_line_dirty(xcache_context_t *ctx, xcache_line_t line)
+{
+ return;
+}
+
+static void deadline_line_clean(xcache_context_t *ctx, xcache_line_t line)
+{
+ return;
+}
+
+static int deadline_get_lines(xcache_context_t *ctx, struct deadline_iter *iter)
+{
+ struct deadline_ctrl *ctrl = xcache_get_evicting(ctx);
+ uint32_t stale = env_ticks_to_secs(env_get_tick_count()) - STALE_SECS;
+ uint64_t i, j;
+ xcache_line_t cline;
+
+ j = 0;
+ for (i = 0, cline = *(iter->iter_start);
+ i < iter->iter_bunch;
+ i++, cline = (cline + 1) % ctrl->line_num)
+ {
+ if (ctrl->access_tick[cline] > stale) {
+ continue;
+ }
+
+ if (!evicting_trylock_line(ctx, cline)) {
+ continue;
+ }
+
+ if (evicting_test_dirty(ctx, cline) != iter->line_dirty) {
+ evicting_unlock_line(ctx, cline);
+ continue;
+ }
+
+ iter->lines[j++] = cline;
+ if (j >= iter->line_num) {
+ *(iter->iter_start) = (cline + 1) % ctrl->line_num;
+ return j;
+ }
+ }
+
+ *(iter->iter_start) = cline;
+ return j;
+}
+
+static int deadline_line_to_clean(xcache_context_t *ctx, xcache_line_t *lines, int line_num)
+{
+ struct deadline_ctrl *ctrl = xcache_get_evicting(ctx);
+
+ struct deadline_iter iter = {
+ .lines = lines,
+ .line_num = line_num,
+ .iter_bunch = CLEAN_BUNCH,
+ .iter_start = &(ctrl->clean_iter_line),
+ .line_dirty = true,
+ };
+ return deadline_get_lines(ctx, &iter);
+}
+
+
+static int deadline_line_to_evict(xcache_context_t *ctx, xcache_line_t *lines, int line_num)
+{
+ struct deadline_ctrl *ctrl = xcache_get_evicting(ctx);
+
+ struct deadline_iter iter = {
+ .lines = lines,
+ .line_num = line_num,
+ .iter_bunch = EVICT_BUNCH,
+ .iter_start = &(ctrl->evict_iter_line),
+ .line_dirty = false,
+ };
+ return deadline_get_lines(ctx, &iter);
+}
+
+static void deadline_destroy(xcache_context_t *ctx)
+{
+ struct deadline_ctrl *ctrl = (struct deadline_ctrl *)ctx->xcache_evicting;
+
+ env_free(ctrl->access_tick);
+ env_free(ctrl);
+}
+
+static struct evicting_policy_ops g_deadline_ops = {
+ .init = deadline_init,
+ .fini = deadline_fini,
+ .line_accessed = deadline_line_accessed,
+ .line_dirty = deadline_line_dirty,
+ .line_clean = deadline_line_clean,
+ .line_to_clean = deadline_line_to_clean,
+ .line_to_evict = deadline_line_to_evict,
+};
+
+void set_deadline_policy(void)
+{
+ evicting_set(&g_deadline_ops);
+}
diff --git a/src/evicting/deadline.h b/src/evicting/deadline.h
new file mode 100644
index 0000000..887737b
--- /dev/null
+++ b/src/evicting/deadline.h
@@ -0,0 +1,6 @@
+#ifndef DEADLINE_H_
+#define DEADLINE_H_
+
+void set_deadline_policy(void);
+
+#endif
diff --git a/src/evicting/evicting.c b/src/evicting/evicting.c
new file mode 100644
index 0000000..542693f
--- /dev/null
+++ b/src/evicting/evicting.c
@@ -0,0 +1,3 @@
+#include "evicting_ops.h"
+
+struct evicting_policy_ops *g_evicting_policy = NULL;
diff --git a/src/evicting/evicting_helper.h b/src/evicting/evicting_helper.h
new file mode 100644
index 0000000..e6defa1
--- /dev/null
+++ b/src/evicting/evicting_helper.h
@@ -0,0 +1,32 @@
+#ifndef EVICTING_HELPER_H_
+#define EVICTING_HELPER_H_
+
+#include "../utils/utils_alock.h"
+#include "../concurrency/ocf_cache_line_concurrency.h"
+#include "../ocf_cache_priv.h"
+#include "../metadata/metadata_status.h"
+
+static inline bool evicting_test_dirty(xcache_context_t *ctx, xcache_line_t line)
+{
+ return metadata_test_dirty(ctx->cache, line);
+}
+
+static inline bool evicting_trylock_line(xcache_context_t *ctx, xcache_line_t line)
+{
+ struct ocf_alock *lock = ocf_cache_line_concurrency(ctx->cache);
+
+ return ocf_alock_trylock_entry_wr(lock, line);
+}
+
+static inline void evicting_unlock_line(xcache_context_t *ctx, xcache_line_t line)
+{
+ struct ocf_alock *lock = ocf_cache_line_concurrency(ctx->cache);
+
+ xcache_unlock_wr(lock, line);
+}
+
+static inline uint64_t evicting_line_num(xcache_context_t *ctx)
+{
+ return ctx->cache->device->collision_table_entries;
+}
+#endif
diff --git a/src/evicting/evicting_ops.h b/src/evicting/evicting_ops.h
new file mode 100644
index 0000000..bd9c3ec
--- /dev/null
+++ b/src/evicting/evicting_ops.h
@@ -0,0 +1,61 @@
+#ifndef EVICTING_OPS_H_
+#define EVICTING_OPS_H_
+
+#include <stddef.h>
+#include "../xcache.h"
+
+struct evicting_policy_ops {
+ int (*init)(xcache_context_t *ctx);
+ void (*line_accessed)(xcache_context_t *ctx, xcache_line_t line);
+ void (*line_dirty)(xcache_context_t *ctx, xcache_line_t line);
+ void (*line_clean)(xcache_context_t *ctx, xcache_line_t line);
+ int (*line_to_clean)(xcache_context_t *ctx, xcache_line_t *lines, int line_num);
+ int (*line_to_evict)(xcache_context_t *ctx, xcache_line_t *lines, int line_num);
+ void (*fini)(xcache_context_t *ctx);
+};
+
+extern struct evicting_policy_ops *g_evicting_policy;
+
+static inline void evicting_set(struct evicting_policy_ops *policy)
+{
+ g_evicting_policy = policy;
+}
+
+static inline void evicting_init(xcache_context_t *ctx)
+{
+ g_evicting_policy->init(ctx);
+}
+
+static inline void evicting_line_accessed(xcache_context_t *ctx, xcache_line_t line)
+{
+ g_evicting_policy->line_accessed(ctx, line);
+}
+
+static inline void evicting_line_dirty(xcache_context_t *ctx, xcache_line_t line)
+{
+ g_evicting_policy->line_dirty(ctx, line);
+}
+
+static inline void evicting_line_clean(xcache_context_t *ctx, xcache_line_t line)
+{
+ g_evicting_policy->line_clean(ctx, line);
+}
+
+static inline xcache_line_t evicting_line_to_clean(xcache_context_t *ctx,
+ xcache_line_t *lines, int line_num)
+{
+ return g_evicting_policy->line_to_clean(ctx, lines, line_num);
+}
+
+static inline xcache_line_t evicting_line_to_evict(xcache_context_t *ctx,
+ xcache_line_t *lines, int line_num)
+{
+ return g_evicting_policy->line_to_evict(ctx, lines, line_num);
+}
+
+static inline void evicting_fini(xcache_context_t *ctx)
+{
+ return g_evicting_policy->fini(ctx);
+}
+
+#endif
diff --git a/src/metadata/metadata_raw.c b/src/metadata/metadata_raw.c
index 15287e7..efef69c 100644
--- a/src/metadata/metadata_raw.c
+++ b/src/metadata/metadata_raw.c
@@ -349,6 +349,8 @@ struct _raw_ram_flush_ctx {
struct ocf_metadata_raw *raw;
struct ocf_request *req;
ocf_req_end_t complete;
+ void *io;
+ ocf_metadata_io_ctx_end_t io_end;
env_atomic flush_req_cnt;
int error;
};
@@ -369,8 +371,7 @@ static void _raw_ram_flush_do_asynch_io_complete(ocf_cache_t cache,
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
/* Call metadata flush completed call back */
- ctx->req->error |= ctx->error;
- ctx->complete(ctx->req, ctx->error);
+ ctx->io_end(ctx->io, ctx->error);
env_free(ctx);
}
@@ -429,17 +430,17 @@ int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2)
return 0;
}
-static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *req,
+static void __raw_ram_flush_do_asynch_add_pages(struct ocf_metadata_line_getter *getter,
uint32_t *pages_tab, struct ocf_metadata_raw *raw,
- int *pages_to_flush) {
+ int *pages_to_flush)
+{
int i, j = 0;
- int line_no = req->core_line_count;
- struct ocf_map_info *map;
+ int line_no = line_getter_line_num(getter);
+ ocf_cache_line_t line;
for (i = 0; i < line_no; i++) {
- map = &req->map[i];
- if (map->flush) {
- pages_tab[j] = _RAW_RAM_PAGE(raw, map->coll_idx);
+ if (line_getter_flush_line(getter, i, &line)) {
+ pages_tab[j] = _RAW_RAM_PAGE(raw, line);
j++;
}
}
@@ -447,37 +448,31 @@ static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *req,
*pages_to_flush = j;
}
-static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
- struct ocf_request *req, struct ocf_metadata_raw *raw,
- ocf_req_end_t complete)
+static int _raw_ram_flush_asynch_common(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, struct ocf_metadata_io_context *io_ctx,
+ struct ocf_metadata_line_getter *line_getter)
{
int result = 0, i;
uint32_t __pages_tab[MAX_STACK_TAB_SIZE];
uint32_t *pages_tab;
- int line_no = req->core_line_count;
+ int line_no = line_getter_line_num(line_getter);
int pages_to_flush;
uint32_t start_page = 0;
uint32_t count = 0;
struct _raw_ram_flush_ctx *ctx;
- ENV_BUG_ON(!complete);
+ ENV_BUG_ON(!io_ctx->io_end);
OCF_DEBUG_TRACE(cache);
- if (!req->info.flush_metadata) {
- /* Nothing to flush call flush callback */
- complete(req, 0);
- return 0;
- }
-
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
if (!ctx) {
- complete(req, -OCF_ERR_NO_MEM);
+ io_context_end(io_ctx, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
- ctx->req = req;
- ctx->complete = complete;
+ ctx->io = io_ctx->io;
+ ctx->io_end = io_ctx->io_end;
ctx->raw = raw;
env_atomic_set(&ctx->flush_req_cnt, 1);
@@ -487,7 +482,7 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
pages_tab = env_zalloc(sizeof(*pages_tab) * line_no, ENV_MEM_NOIO);
if (!pages_tab) {
env_free(ctx);
- complete(req, -OCF_ERR_NO_MEM);
+ io_context_end(io_ctx, -OCF_ERR_NO_MEM);
return -OCF_ERR_NO_MEM;
}
}
@@ -496,7 +491,7 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
* to prevent freeing of asynchronous context
*/
- __raw_ram_flush_do_asynch_add_pages(req, pages_tab, raw,
+ __raw_ram_flush_do_asynch_add_pages(line_getter, pages_tab, raw,
&pages_to_flush);
env_sort(pages_tab, pages_to_flush, sizeof(*pages_tab),
@@ -526,9 +521,9 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
env_atomic_inc(&ctx->flush_req_cnt);
- result |= metadata_io_write_i_asynch(cache, req->io_queue, ctx,
+ result |= metadata_io_write_i_asynch(cache, io_ctx->queue, ctx,
raw->ssd_pages_offset + start_page, count,
- req->ioi.io.flags,
+ io_ctx->io_flags,
_raw_ram_flush_do_asynch_fill,
_raw_ram_flush_do_asynch_io_complete,
raw->mio_conc);
@@ -547,6 +542,43 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
return result;
}
+static int req_line_num(void *getter)
+{
+ struct ocf_request *req = (struct ocf_request *)getter;
+
+ return req->core_line_count;
+}
+
+static bool req_flush_line(void *getter, int index, ocf_cache_line_t *line)
+{
+ struct ocf_request *req = (struct ocf_request *)getter;
+
+ if (!req->map[index].flush) {
+ return false;
+ }
+
+ *line = req->map[index].coll_idx;
+ return true;
+}
+
+static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
+ struct ocf_request *req, struct ocf_metadata_raw *raw,
+ ocf_req_end_t complete)
+{
+ struct ocf_metadata_io_context io_ctx = {
+ .io = (void *)req,
+ .io_flags = req->ioi.io.flags,
+ .io_end = (ocf_metadata_io_ctx_end_t)complete,
+ .queue = req->io_queue,
+ };
+ struct ocf_metadata_line_getter line_getter = {
+ .getter = (void *)req,
+ .get_line_num = req_line_num,
+ .get_flush_line = req_flush_line,
+ };
+ return _raw_ram_flush_asynch_common(cache, raw, &io_ctx, &line_getter);
+}
+
/*******************************************************************************
* RAW Interfaces definitions
******************************************************************************/
@@ -566,6 +598,7 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = {
.flush_all = _raw_ram_flush_all,
.flush_mark = _raw_ram_flush_mark,
.flush_do_asynch = _raw_ram_flush_do_asynch,
+ .flush_do_asynch_common = _raw_ram_flush_asynch_common
},
[metadata_raw_type_dynamic] = {
.init = raw_dynamic_init,
diff --git a/src/metadata/metadata_raw.h b/src/metadata/metadata_raw.h
index 0357774..57f7b75 100644
--- a/src/metadata/metadata_raw.h
+++ b/src/metadata/metadata_raw.h
@@ -93,6 +93,35 @@ struct ocf_metadata_raw {
struct ocf_alock *mio_conc;
};
+struct ocf_metadata_line_getter {
+ void *getter;
+ int (*get_line_num)(void *getter);
+ bool (*get_flush_line)(void *getter, int index, ocf_cache_line_t *line);
+};
+
+static inline int line_getter_line_num(struct ocf_metadata_line_getter *getter)
+{
+ return getter->get_line_num(getter->getter);
+}
+
+static inline bool line_getter_flush_line(struct ocf_metadata_line_getter *getter,
+ int index, ocf_cache_line_t *line)
+{
+ return getter->get_flush_line(getter->getter, index, line);
+}
+
+typedef void (*ocf_metadata_io_ctx_end_t)(void *io, int error);
+struct ocf_metadata_io_context {
+ void *io;
+ int io_flags;
+ ocf_queue_t queue;
+ ocf_metadata_io_ctx_end_t io_end;
+};
+
+static inline void io_context_end(struct ocf_metadata_io_context *ctx, int error)
+{
+ ctx->io_end(ctx->io, error);
+}
/**
* RAW container interface
*/
@@ -137,8 +166,12 @@ struct raw_iface {
int (*flush_do_asynch)(ocf_cache_t cache, struct ocf_request *req,
struct ocf_metadata_raw *raw, ocf_req_end_t complete);
+
+ int (*flush_do_asynch_common)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ struct ocf_metadata_io_context *io_ctx, struct ocf_metadata_line_getter *line_getter);
};
+
/**
* @brief Initialize RAW instance
*
@@ -288,6 +321,13 @@ static inline int ocf_metadata_raw_flush_do_asynch(ocf_cache_t cache,
return raw->iface->flush_do_asynch(cache, req, raw, complete);
}
+static inline int ocf_metadata_raw_flush_do_asynch_common(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw, struct ocf_metadata_io_context *io_ctx,
+ struct ocf_metadata_line_getter *line_getter)
+{
+ return raw->iface->flush_do_asynch_common(cache, raw, io_ctx, line_getter);
+}
+
/*
* Check if line is valid for specified RAW descriptor
*/
diff --git a/src/metadata/xcache_metadata.c b/src/metadata/xcache_metadata.c
new file mode 100644
index 0000000..53f385c
--- /dev/null
+++ b/src/metadata/xcache_metadata.c
@@ -0,0 +1,88 @@
+#include "ocf/ocf.h"
+
+#include "xcache_metadata.h"
+#include "metadata.h"
+#include "metadata_internal.h"
+#include "../engine/xcache_engine_common.h" /* xcache_io_xxx */
+#include "../ocf_cache_priv.h"
+
+void xcache_metadata_flush_asynch(struct ocf_cache *cache,
+ struct ocf_metadata_io_context *io_ctx,
+ struct ocf_metadata_line_getter *line_getter)
+{
+ int result = 0;
+ struct ocf_metadata_ctrl *ctrl = NULL;
+
+ ctrl = (struct ocf_metadata_ctrl *) cache->metadata.priv;
+ result |= ocf_metadata_raw_flush_do_asynch_common(cache,
+ &(ctrl->raw_desc[metadata_segment_collision]),
+ io_ctx, line_getter);
+ if (result) {
+ ocf_metadata_error(cache);
+ ocf_cache_log(cache, log_err, "Metadata Flush ERROR\n");
+ }
+}
+
+static inline bool is_power_of_two(uint64_t n)
+{
+ return (((n - 1) & n) == 0);
+}
+
+int xcache_io_add_flush_line(struct xcache_io *io, ocf_cache_line_t line)
+{
+ ocf_cache_line_t *flush_lines;
+ int i;
+
+ if (io->flush_line_num < INLINE_FLUSH_LINES) {
+ flush_lines = io->flush_lines;
+ } else if (is_power_of_two(io->flush_line_num)) {
+ flush_lines = env_malloc(sizeof(ocf_cache_line_t) * (io->flush_line_num << 1), 0);
+ if (flush_lines == NULL) {
+ return -ENOMEM;
+ }
+ for (i = 0; i < io->flush_line_num; i++) {
+ flush_lines[i] = io->flush_lines[i];
+ }
+ *(ocf_cache_line_t **)io->flush_lines = flush_lines;
+ } else {
+ flush_lines = *(ocf_cache_line_t **)io->flush_lines;
+ }
+
+ flush_lines[io->flush_line_num++] = line;
+ return 0;
+}
+
+static int io_line_num(void *getter)
+{
+ struct xcache_io *io = (struct xcache_io *)getter;
+
+ return io->flush_line_num;
+}
+
+static bool io_flush_line(void *getter, int index, ocf_cache_line_t *line)
+{
+ struct xcache_io *io = (struct xcache_io *)getter;
+ ocf_cache_line_t *flush_lines;
+
+ flush_lines = xcache_io_get_flush_line(io);
+ *line = io->flush_lines[index];
+ return true;
+}
+
+void xcache_io_metadata_flush(struct xcache_io *io, ocf_metadata_io_ctx_end_t io_end)
+{
+ struct ocf_metadata_io_context io_ctx = {
+ .io = (void *)io,
+ .io_flags = xcache_io_flags(io),
+ .io_end = io_end,
+ .queue = xcache_io_queue(io),
+ };
+ struct ocf_metadata_line_getter line_getter = {
+ .getter = (void *)io,
+ .get_line_num = io_line_num,
+ .get_flush_line = io_flush_line,
+ };
+
+ xcache_io_get(io);
+ xcache_metadata_flush_asynch(xcache_io_cache(io), &io_ctx, &line_getter);
+}
diff --git a/src/metadata/xcache_metadata.h b/src/metadata/xcache_metadata.h
new file mode 100644
index 0000000..e82580e
--- /dev/null
+++ b/src/metadata/xcache_metadata.h
@@ -0,0 +1,47 @@
+#ifndef __XCACHE_METADATA_H__
+#define __XCACHE_METADATA_H__
+
+#include "ocf/ocf.h"
+#include "metadata_raw.h"
+
+#include "ocf/xcache.h"
+
+void xcache_metadata_flush_asynch(struct ocf_cache *cache,
+ struct ocf_metadata_io_context *io_ctx,
+ struct ocf_metadata_line_getter *line_getter);
+
+int xcache_io_add_flush_line(struct xcache_io *io, ocf_cache_line_t line);
+static inline void xcache_io_free_flush_line(struct xcache_io *io)
+{
+ if (io->flush_line_num <= INLINE_FLUSH_LINES) {
+ return;
+ }
+ env_free(*(ocf_cache_line_t **)io->flush_lines);
+}
+
+static inline ocf_cache_line_t *get_inline_flush_lines(struct xcache_io *io)
+{
+ return io->flush_lines;
+}
+
+static inline ocf_cache_line_t *get_alloc_flush_lines(struct xcache_io *io)
+{
+ return *(ocf_cache_line_t **)io->flush_lines;
+}
+
+static inline ocf_cache_line_t *xcache_io_get_flush_line(struct xcache_io *io)
+{
+ if (io->flush_line_num <= INLINE_FLUSH_LINES) {
+ return get_inline_flush_lines(io);
+ } else {
+ return get_alloc_flush_lines(io);
+ }
+}
+
+static inline bool xcache_metadata_should_flush(struct xcache_io *io)
+{
+ return (env_atomic_read(&io->remaining) == 1);
+}
+
+void xcache_io_metadata_flush(struct xcache_io *io, ocf_metadata_io_ctx_end_t io_end);
+#endif
diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c
index 54a2a67..591ffcd 100644
--- a/src/mngt/ocf_mngt_cache.c
+++ b/src/mngt/ocf_mngt_cache.c
@@ -124,6 +124,8 @@ struct ocf_cache_attach_context {
*/
bool concurrency_inited : 1;
+
+ bool xcache_inited : 1;
} flags;
struct {
@@ -975,6 +977,12 @@ static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline,
context->flags.attached_metadata_inited = true;
+ ret = xcache_init(cache);
+ if (ret != 0) {
+ OCF_PL_FINISH_RET(pipeline, ret);
+ }
+ context->flags.xcache_inited = 1;
+
ret = ocf_concurrency_init(cache);
if (ret)
OCF_PL_FINISH_RET(pipeline, ret);
@@ -1126,6 +1134,9 @@ static void _ocf_mngt_attach_handle_error(
if (context->flags.device_alloc)
env_vfree(cache->device);
+ if (context->flags.xcache_inited)
+ xcache_fini(cache);
+
ocf_pipeline_destroy(cache->stop_pipeline);
}
diff --git a/src/ocf_cache_priv.h b/src/ocf_cache_priv.h
index b0a6f77..07fcd63 100644
--- a/src/ocf_cache_priv.h
+++ b/src/ocf_cache_priv.h
@@ -139,6 +139,7 @@ struct ocf_cache {
} __attribute__((aligned(64)));
// This should be on it's own cacheline ideally
env_atomic last_access_ms;
+ void *xcache_ctx;
};
static inline ocf_core_t ocf_cache_get_core(ocf_cache_t cache,
diff --git a/src/ocf_lru.c b/src/ocf_lru.c
index e9c3882..8e323c0 100644
--- a/src/ocf_lru.c
+++ b/src/ocf_lru.c
@@ -221,7 +221,7 @@ void ocf_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
node->next = end_marker;
}
-static struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
+struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
uint32_t lru_idx, bool clean)
{
if (part->id == PARTITION_FREELIST)
@@ -261,7 +261,7 @@ static inline void ocf_lru_move(ocf_cache_t cache, ocf_cache_line_t cline,
add_lru_head(cache, dst_list, cline);
}
-static void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
+void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_part, struct ocf_part *dst_part)
{
uint32_t lru_list = (cline % OCF_NUM_LRU_LISTS);
@@ -658,7 +658,7 @@ void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
ocf_cleaner_fire(cache, &attribs);
}
-static void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
+void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
ocf_core_id_t core_id, ocf_part_id_t part_id)
{
ocf_core_t core;
diff --git a/src/ocf_lru.h b/src/ocf_lru.h
index a71b5fd..ae50b5e 100644
--- a/src/ocf_lru.h
+++ b/src/ocf_lru.h
@@ -33,4 +33,11 @@ void ocf_lru_repart(ocf_cache_t cache, ocf_cache_line_t cline,
uint32_t ocf_lru_num_free(ocf_cache_t cache);
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines);
+void ocf_lru_repart_locked(ocf_cache_t cache, ocf_cache_line_t cline,
+ struct ocf_part *src_part, struct ocf_part *dst_part);
+struct ocf_lru_list *ocf_lru_get_list(struct ocf_part *part,
+ uint32_t lru_idx, bool clean);
+void ocf_lru_invalidate(ocf_cache_t cache, ocf_cache_line_t cline,
+ ocf_core_id_t core_id, ocf_part_id_t part_id);
+
#endif
diff --git a/src/ocf_queue.c b/src/ocf_queue.c
index a754d6e..9e89102 100644
--- a/src/ocf_queue.c
+++ b/src/ocf_queue.c
@@ -51,8 +51,15 @@ int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
return result;
}
- list_add(&tmp_queue->list, &cache->io_queues);
+ result = xcache_queue_ctx_init(tmp_queue);
+ if (result) {
+ ocf_queue_seq_cutoff_deinit(tmp_queue);
+ ocf_mngt_cache_put(cache);
+ env_free(tmp_queue);
+ return result;
+ }
+ list_add(&tmp_queue->list, &cache->io_queues);
*queue = tmp_queue;
return 0;
@@ -91,7 +98,7 @@ void ocf_io_handle(struct ocf_io *io, void *opaque)
req->io_if->read(req);
}
-void ocf_queue_run_single(ocf_queue_t q)
+void __attribute__((weak)) ocf_queue_run_single(ocf_queue_t q)
{
struct ocf_request *io_req = NULL;
diff --git a/src/ocf_queue_priv.h b/src/ocf_queue_priv.h
index def5d08..62bf900 100644
--- a/src/ocf_queue_priv.h
+++ b/src/ocf_queue_priv.h
@@ -36,6 +36,8 @@ struct ocf_queue {
env_atomic ref_count;
env_spinlock io_list_lock;
+
+ void *priv1;
} __attribute__((__aligned__(64)));
static inline void ocf_queue_kick(ocf_queue_t queue, bool allow_sync)
diff --git a/src/qos/qos.c b/src/qos/qos.c
new file mode 100644
index 0000000..6ea2da9
--- /dev/null
+++ b/src/qos/qos.c
@@ -0,0 +1,6 @@
+#include "qos.h"
+
+void xcache_qos_init(struct xcache_qos *qos)
+{
+ qos_lb_init(&qos->qos_lb);
+}
diff --git a/src/qos/qos.h b/src/qos/qos.h
new file mode 100644
index 0000000..3b6a691
--- /dev/null
+++ b/src/qos/qos.h
@@ -0,0 +1,27 @@
+#ifndef __QOS_H__
+#define __QOS_H__
+
+#include "qos_lb.h"
+
+struct xcache_qos {
+ struct qos_lb qos_lb;
+};
+
+static inline void xcache_qos_load_add(struct xcache_qos *qos, int dev, enum xcache_dir dir, uint64_t bytes)
+{
+ qos_lb_load_add(&qos->qos_lb, dev, dir, bytes);
+}
+
+static inline void xcache_qos_load_sub(struct xcache_qos *qos, int dev, enum xcache_dir dir, uint64_t bytes)
+{
+ qos_lb_load_sub(&qos->qos_lb, dev, dir, bytes);
+}
+
+static inline bool xcache_qos_need_lb(struct xcache_qos *qos, enum xcache_dir dir, uint64_t bytes)
+{
+ return qos_need_lb(&qos->qos_lb, dir, bytes);
+}
+
+void xcache_qos_init(struct xcache_qos *qos);
+
+#endif
diff --git a/src/qos/qos_lb.h b/src/qos/qos_lb.h
new file mode 100644
index 0000000..bb3bfe4
--- /dev/null
+++ b/src/qos/qos_lb.h
@@ -0,0 +1,143 @@
+#ifndef __QOS_LB_H__
+#define __QOS_LB_H__
+
+#define CORE_DEFAULT_LOAD_WEIGHT 30
+#define CACHE_DEFAULT_LOAD_WEIGHT 1
+
+#define CORE_DEV 0
+#define CACHE_DEV 1
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include "ocf/xcache.h"
+
+struct qos_dev_load {
+ env_atomic64 read_inflight_bytes;
+ env_atomic64 write_inflight_bytes;
+ uint32_t read_weight;
+ uint32_t write_weight;
+};
+
+static inline void qos_dev_load_init(struct qos_dev_load *load)
+{
+ env_atomic64_set(&load->read_inflight_bytes, 0);
+ env_atomic64_set(&load->write_inflight_bytes, 0);
+}
+
+static inline void qos_dev_load_add(struct qos_dev_load *load, enum xcache_dir dir, uint64_t bytes)
+{
+ switch (dir) {
+ case XCACHE_RD:
+ env_atomic64_add(bytes, &load->read_inflight_bytes);
+ break;
+ case XCACHE_WR:
+ env_atomic64_add(bytes, &load->write_inflight_bytes);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void qos_dev_load_sub(struct qos_dev_load *load, enum xcache_dir dir, uint64_t bytes)
+{
+ switch (dir) {
+ case XCACHE_RD:
+ env_atomic64_sub(bytes, &load->read_inflight_bytes);
+ break;
+ case XCACHE_WR:
+ env_atomic64_sub(bytes, &load->write_inflight_bytes);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline uint64_t qos_dev_load_read(struct qos_dev_load *load, enum xcache_dir dir)
+{
+ switch (dir) {
+ case XCACHE_RD:
+ return env_atomic64_read(&load->read_inflight_bytes);
+ case XCACHE_WR:
+ return env_atomic64_read(&load->write_inflight_bytes);
+ default:
+ return 0;
+ }
+}
+
+static inline uint32_t qos_dev_load_weight(struct qos_dev_load *load, enum xcache_dir dir)
+{
+ switch (dir) {
+ case XCACHE_RD:
+ return load->read_weight;
+ case XCACHE_WR:
+ return load->write_weight;
+ default:
+ return 0;
+ }
+}
+
+static inline uint64_t do_cal_load(uint64_t bytes, uint32_t weight)
+{
+ return bytes * weight;
+}
+
+static inline uint64_t qos_dev_load_cal(struct qos_dev_load *load)
+{
+ uint64_t read_inflight_bytes = qos_dev_load_read(load, XCACHE_RD);
+ uint64_t write_inflight_bytes = qos_dev_load_read(load, XCACHE_WR);
+
+ return do_cal_load(read_inflight_bytes, load->read_weight) +
+ do_cal_load(write_inflight_bytes, load->write_weight);
+}
+
+struct qos_lb {
+ struct qos_dev_load cache_load;
+ struct qos_dev_load core_load;
+};
+
+static inline void qos_lb_init(struct qos_lb *qos_lb)
+{
+ qos_dev_load_init(&qos_lb->cache_load);
+ qos_dev_load_init(&qos_lb->core_load);
+ qos_lb->cache_load.read_weight = CACHE_DEFAULT_LOAD_WEIGHT;
+ qos_lb->cache_load.write_weight = CACHE_DEFAULT_LOAD_WEIGHT;
+ qos_lb->core_load.read_weight = CORE_DEFAULT_LOAD_WEIGHT;
+ qos_lb->core_load.write_weight = CORE_DEFAULT_LOAD_WEIGHT;
+}
+
+static inline void qos_lb_load_add(struct qos_lb *qos_lb, int dev, enum xcache_dir dir, uint64_t bytes)
+{
+ switch (dev) {
+ case CACHE_DEV:
+ qos_dev_load_add(&qos_lb->cache_load, dir, bytes);
+ break;
+ case CORE_DEV:
+ qos_dev_load_add(&qos_lb->core_load, dir, bytes);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void qos_lb_load_sub(struct qos_lb *qos_lb, int dev, enum xcache_dir dir, uint64_t bytes)
+{
+ switch (dev) {
+ case CACHE_DEV:
+ qos_dev_load_sub(&qos_lb->cache_load, dir, bytes);
+ break;
+ case CORE_DEV:
+ qos_dev_load_sub(&qos_lb->core_load, dir, bytes);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline bool qos_need_lb(struct qos_lb *qos_lb, enum xcache_dir dir, uint64_t bytes)
+{
+ return qos_dev_load_cal(&qos_lb->cache_load) > qos_dev_load_cal(&qos_lb->core_load) +
+ do_cal_load(bytes, qos_dev_load_weight(&qos_lb->core_load, dir));
+}
+
+#endif
diff --git a/src/utils/utils_alock.c b/src/utils/utils_alock.c
index 25f41a6..7cecacf 100644
--- a/src/utils/utils_alock.c
+++ b/src/utils/utils_alock.c
@@ -799,3 +799,125 @@ uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock)
{
return env_atomic_read(&alock->waiting);
}
+
+int xcache_lock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry,
+ ocf_io_lock_prepare_wait prepare_wait_fn,
+ ocf_io_lock_prepare_wake prepare_wake_fn,
+ void *io)
+{
+ struct ocf_alock_waiter *waiter;
+ unsigned long flags = 0;
+ int ret = OCF_LOCK_NOT_ACQUIRED;
+
+ if (ocf_alock_trylock_entry_wr(alock, entry)) {
+ return OCF_LOCK_ACQUIRED;
+ }
+
+ ocf_alock_waitlist_lock(alock, entry, flags);
+
+ /* At the moment list is protected, double check if the cache entry is
+ * unlocked
+ */
+ if (ocf_alock_trylock_entry_wr(alock, entry)) {
+ ret = OCF_LOCK_ACQUIRED;
+ goto unlock;
+ }
+
+ waiter = env_allocator_new(alock->allocator);
+ if (!waiter) {
+ ret = -OCF_ERR_NO_MEM;
+ goto unlock;
+ }
+
+ /* Setup waiters filed */
+ waiter->entry = entry;
+ waiter->req = (struct ocf_request *)io;
+ waiter->cmpl = prepare_wake_fn;
+ waiter->rw = OCF_WRITE;
+ INIT_LIST_HEAD(&waiter->item);
+
+ prepare_wait_fn(io);
+ /* Add to waiters list */
+ ocf_alock_waitlist_add(alock, entry, waiter);
+
+unlock:
+ ocf_alock_waitlist_unlock(alock, entry, flags);
+
+ return ret;
+}
+
+static inline void xcache_unlock_wr_common(struct ocf_alock *alock,
+ const ocf_cache_line_t entry)
+{
+ bool locked = false;
+ bool exchanged = true;
+
+ uint32_t idx = _WAITERS_LIST_ITEM(entry);
+ struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
+ struct ocf_alock_waiter *waiter;
+
+ struct list_head *iter, *next;
+
+ /*
+ * Lock exchange scenario
+ * 1. WR -> IDLE
+ * 2. WR -> RD
+ * 3. WR -> WR
+ */
+
+ /* Check is requested page is on the list */
+ list_for_each_safe(iter, next, &lst->head) {
+ waiter = list_entry(iter, struct ocf_alock_waiter, item);
+
+ if (entry != waiter->entry)
+ continue;
+
+ if (exchanged) {
+ if (waiter->rw == OCF_WRITE)
+ locked = ocf_alock_trylock_entry_wr2wr(alock, entry);
+ else if (waiter->rw == OCF_READ)
+ locked = ocf_alock_trylock_entry_wr2rd(alock, entry);
+ else
+ ENV_BUG();
+ } else {
+ if (waiter->rw == OCF_WRITE)
+ locked = ocf_alock_trylock_entry_wr(alock, entry);
+ else if (waiter->rw == OCF_READ)
+ locked = ocf_alock_trylock_entry_rd(alock, entry);
+ else
+ ENV_BUG();
+ }
+
+ if (locked) {
+ exchanged = false;
+ list_del(iter);
+
+ waiter->cmpl(waiter->req);
+
+ env_allocator_del(alock->allocator, waiter);
+ } else {
+ break;
+ }
+ }
+
+ if (exchanged) {
+ /* No exchange, no waiters on the list, unlock and return
+ * WR -> IDLE
+ */
+ ocf_alock_unlock_entry_wr(alock, entry);
+ }
+}
+
+void xcache_unlock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry)
+{
+ unsigned long flags = 0;
+
+ OCF_DEBUG_CACHE(alock->cache, "Cache entry unlock one wr = %u", entry);
+
+ /* Lock waiters list */
+ ocf_alock_waitlist_lock(alock, entry, flags);
+ xcache_unlock_wr_common(alock, entry);
+ ocf_alock_waitlist_unlock(alock, entry, flags);
+}
diff --git a/src/utils/utils_alock.h b/src/utils/utils_alock.h
index 2d3df97..80188e3 100644
--- a/src/utils/utils_alock.h
+++ b/src/utils/utils_alock.h
@@ -87,4 +87,15 @@ void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
bool ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock,
ocf_cache_line_t entry);
+typedef void (*ocf_io_lock_prepare_wait)(void *io);
+typedef void (*ocf_io_lock_prepare_wake)(void *io);
+int xcache_lock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry,
+ ocf_io_lock_prepare_wait prepare_wait_fn,
+ ocf_io_lock_prepare_wake prepare_wake_fn,
+ void *io);
+
+void xcache_unlock_wr(struct ocf_alock *alock,
+ const ocf_cache_line_t entry);
+
#endif
diff --git a/src/utils/utils_cache_line.c b/src/utils/utils_cache_line.c
index 281ff59..bc58054 100644
--- a/src/utils/utils_cache_line.c
+++ b/src/utils/utils_cache_line.c
@@ -119,8 +119,10 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
*/
env_atomic_dec(&req->core->runtime_meta->
part_counters[part_id].dirty_clines);
- ocf_lru_clean_cline(cache, part, line);
- ocf_purge_cleaning_policy(cache, line);
+ if (cache->xcache_ctx == NULL) {
+ ocf_lru_clean_cline(cache, part, line);
+ ocf_purge_cleaning_policy(cache, line);
+ }
}
}
diff --git a/src/xcache.c b/src/xcache.c
new file mode 100644
index 0000000..0c6a2b8
--- /dev/null
+++ b/src/xcache.c
@@ -0,0 +1,30 @@
+#include "xcache.h"
+#include "evicting/evicting_ops.h"
+#include "evicting/deadline.h"
+#include "utils/utils_cache_line.h"
+
+int xcache_init(ocf_cache_t cache)
+{
+ xcache_context_t *ctx = env_malloc(sizeof(xcache_context_t), 0);
+
+ if (ctx == NULL) {
+ return -1;
+ }
+
+ ctx->cache = cache;
+ cache->xcache_ctx = ctx;
+
+ set_deadline_policy();
+ evicting_init(ctx);
+ ctx->line_size_shift = __builtin_ffsll(ocf_line_size(cache)) - 1;
+ xcache_qos_init(&ctx->qos);
+
+ return 0;
+}
+
+void xcache_fini(ocf_cache_t cache)
+{
+ evicting_fini(cache->xcache_ctx);
+ env_free(cache->xcache_ctx);
+ cache->xcache_ctx = NULL;
+}
diff --git a/src/xcache.h b/src/xcache.h
new file mode 100644
index 0000000..4fd7277
--- /dev/null
+++ b/src/xcache.h
@@ -0,0 +1,58 @@
+#ifndef XCACHE_H_
+#define XCACHE_H_
+
+#include <stdint.h>
+#include "ocf/ocf_types.h"
+#include "./ocf_cache_priv.h"
+
+#include "ocf/xcache.h"
+#include "xcache_cleaner.h"
+#include "qos/qos.h"
+
+typedef ocf_cache_line_t xcache_line_t;
+
+typedef struct xcache_context {
+ uint8_t line_size_shift;
+ void *xcache_evicting;
+ ocf_cache_t cache;
+ struct xcache_cleaning_ctx cleaning_ctx;
+ struct xcache_qos qos;
+} xcache_context_t;
+
+static inline xcache_context_t *xcache_get_ctx(ocf_cache_t cache)
+{
+ return cache->xcache_ctx;
+}
+
+static inline void *xcache_get_evicting(xcache_context_t *ctx)
+{
+ return ctx->xcache_evicting;
+}
+
+static inline void xcache_set_evicting(xcache_context_t *ctx, void *evicting)
+{
+ ctx->xcache_evicting = evicting;
+}
+
+static inline uint64_t xcache_addr_to_line(ocf_cache_t cache, uint64_t addr)
+{
+ xcache_context_t *ctx = cache->xcache_ctx;
+ return addr >> ctx->line_size_shift;
+}
+
+static inline uint64_t xcache_line_to_addr(ocf_cache_t cache, uint64_t line)
+{
+ xcache_context_t *ctx = cache->xcache_ctx;
+ return line << ctx->line_size_shift;
+}
+
+static inline struct xcache_cleaning_ctx *xcache_get_cleaning_ctx(ocf_cache_t cache)
+{
+ xcache_context_t *xcache_ctx = cache->xcache_ctx;
+ return &xcache_ctx->cleaning_ctx;
+}
+
+int xcache_init(ocf_cache_t cache);
+void xcache_fini(ocf_cache_t cache);
+
+#endif
diff --git a/src/xcache_cleaner.c b/src/xcache_cleaner.c
new file mode 100644
index 0000000..5de3369
--- /dev/null
+++ b/src/xcache_cleaner.c
@@ -0,0 +1,572 @@
+#include "metadata/metadata.h"
+#include "concurrency/ocf_concurrency.h"
+#include "utils/utils_cleaner.h"
+#include "utils/utils_cache_line.h"
+#include "ocf_queue_priv.h"
+#include "cleaning/cleaning.h"
+
+#include "ocf/xcache.h"
+
+#include "xcache_cleaner.h"
+#include "xcache.h"
+#include "engine/xcache_engine_common.h"
+#include "xcache_queue.h"
+#include "evicting/evicting_ops.h"
+#include "metadata/xcache_metadata.h"
+
+static inline uint8_t get_first_sector(ocf_cache_t cache, ocf_cache_line_t line, uint8_t start_sector, bool dirty)
+{
+ uint8_t sector;
+
+ for (sector = start_sector; sector < ocf_line_sectors(cache); sector++) {
+ if (metadata_test_dirty_one(cache, line, sector) == dirty) {
+ break;
+ }
+ }
+ return sector;
+}
+
+static inline uint8_t get_first_clean_sector(ocf_cache_t cache, ocf_cache_line_t line, uint8_t start_sector)
+{
+ return get_first_sector(cache, line, start_sector, false);
+}
+
+static inline uint8_t get_first_dirty_sector(ocf_cache_t cache, ocf_cache_line_t line, uint8_t start_sector)
+{
+ return get_first_sector(cache, line, start_sector, true);
+}
+
+static inline void get_dirty_sectors(ocf_cache_t cache, ocf_cache_line_t line,
+ uint8_t *start_sector, uint8_t *end_sector)
+{
+ bool dirty;
+
+ uint8_t sector = *start_sector;
+ uint8_t line_sectors = ocf_line_sectors(cache);
+
+ // fast path
+ if (*start_sector == 0 && metadata_test_dirty_all(cache, line)) {
+ *end_sector = line_sectors - 1;
+ return;
+ }
+
+ *start_sector = get_first_dirty_sector(cache, line, *start_sector);
+ if (*start_sector >= line_sectors) {
+ return;
+ }
+
+ *end_sector = get_first_clean_sector(cache, line, *start_sector + 1) - 1;
+}
+
+static inline void xcache_clean_unlock_line(struct xcache_cleaning_ctx *cleaning_ctx)
+{
+ struct clean_sort_data *data = cleaning_ctx->data;
+ struct ocf_alock *c = ocf_cache_line_concurrency(cleaning_ctx->cache);
+ int i;
+
+ for (i = 0; i < cleaning_ctx->count; i++) {
+ xcache_unlock_wr(c, data[i].line);
+ }
+}
+
+static inline void xcache_clean_cleanup(struct xcache_cleaning_ctx *cleaning_ctx)
+{
+ struct xcache_backdev_io *base_io = &cleaning_ctx->base_io;
+ ocf_cache_t cache = cleaning_ctx->cache;
+
+ int i;
+
+ if (base_io->data != NULL) {
+ ctx_data_free(cache->owner, base_io->data);
+ base_io->data = NULL;
+ }
+
+ xcache_cleaner_complete(cleaning_ctx);
+}
+
+static inline void xcache_clean_cb(struct xcache_cleaning_ctx *cleaning_ctx)
+{
+ if (cleaning_ctx->end) {
+ cleaning_ctx->end(cleaning_ctx->arg);
+ cleaning_ctx->end = NULL;
+ }
+}
+
+static void xcache_clean_end(struct xcache_cleaning_ctx *cleaning_ctx)
+{
+ xcache_clean_unlock_line(cleaning_ctx);
+ xcache_clean_cleanup(cleaning_ctx);
+ xcache_clean_cb(cleaning_ctx);
+}
+
+static void xcache_clean_update_metadata(ocf_cache_t cache, ocf_core_t core,
+ ocf_cache_line_t line)
+{
+ ocf_part_id_t part_id = PARTITION_DEFAULT;
+ struct ocf_part *part = &cache->user_parts[part_id].part;
+
+ ocf_metadata_start_collision_shared_access(cache, line);
+ metadata_clear_dirty(cache, line);
+
+ if (env_atomic_dec_and_test(&core->runtime_meta->
+ dirty_clines)) {
+ env_atomic64_set(&core->runtime_meta->
+ dirty_since, 0);
+ }
+
+ env_atomic_dec(&core->runtime_meta->
+ part_counters[part_id].dirty_clines);
+ ocf_metadata_end_collision_shared_access(cache,
+ line);
+}
+
+static int clean_line_num(void *clean_io)
+{
+ struct xcache_cleaning_ctx *cleaning_ctx = (struct xcache_cleaning_ctx *)clean_io;
+
+ return cleaning_ctx->count;
+}
+
+static bool clean_flush_line(void *clean_io, int index, ocf_cache_line_t *line)
+{
+ struct xcache_cleaning_ctx *cleaning_ctx = (struct xcache_cleaning_ctx *)clean_io;
+
+ *line = cleaning_ctx->data[index].line;
+ return true;
+}
+
+static void xcache_clean_flush_metadata_end(void *clean_io, int error)
+{
+ struct xcache_cleaning_ctx *cleaning_ctx = (struct xcache_cleaning_ctx *)clean_io;
+
+ xcache_clean_end(cleaning_ctx);
+}
+
+static void xcache_clean_metadata_flush(struct xcache_cleaning_ctx *cleaning_ctx)
+{
+ struct xcache_io *io = &cleaning_ctx->io;
+ struct ocf_metadata_io_context io_ctx = {
+ .io = (void *)cleaning_ctx,
+ .io_flags = 0,
+ .io_end = xcache_clean_flush_metadata_end,
+ .queue = xcache_io_queue(io),
+ };
+ struct ocf_metadata_line_getter line_getter = {
+ .getter = (void *)cleaning_ctx,
+ .get_line_num = clean_line_num,
+ .get_flush_line = clean_flush_line,
+ };
+
+ xcache_metadata_flush_asynch(cleaning_ctx->cache, &io_ctx, &line_getter);
+}
+
+static void xcache_clean_bf_end(struct xcache_io *io, int error)
+{
+ struct xcache_cleaning_ctx *cleaning_ctx = container_of(io, struct xcache_cleaning_ctx,
+ io);
+ struct clean_sort_data *data = cleaning_ctx->data;
+ ocf_cache_t cache = cleaning_ctx->cache;
+ struct ocf_alock *c = ocf_cache_line_concurrency(cleaning_ctx->cache);
+
+ ocf_core_t core;
+ ocf_cache_line_t line;
+ int i, j;
+
+ if (error != 0) {
+ xcache_clean_end(cleaning_ctx);
+ return;
+ }
+
+ for (i = 0, j = 0; i < cleaning_ctx->count; i++) {
+ // core_line write failed
+ if (data[i].core_line == ULLONG_MAX) {
+ xcache_unlock_wr(c, data[i].line);
+ continue;
+ }
+
+ core = ocf_cache_get_core(cache, data[i].core_id);
+ line = data[i].line;
+ xcache_clean_update_metadata(cache, core, line);
+ // record line to flush
+ data[j++].line = data[i].line;
+ }
+ cleaning_ctx->count = j;
+
+ if (j == 0) {
+ xcache_clean_end(cleaning_ctx);
+ } else {
+ xcache_clean_metadata_flush(cleaning_ctx);
+ }
+}
+
+static void remove_failed_line(struct xcache_cleaning_ctx *cleaning_ctx)
+{
+ int l, r;
+
+ l = 0;
+ for (r = 0; r < cleaning_ctx->count; r++) {
+ if (cleaning_ctx->data[r].line == INVALID_LINE) {
+ continue;
+ }
+ if (l != r) {
+ cleaning_ctx->data[l] = cleaning_ctx->data[r];
+ }
+ l++;
+ }
+ cleaning_ctx->count = l;
+}
+
+static int xcache_clean_write_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_cleaning_ctx *cleaning_ctx = container_of(base_io,
+ struct xcache_cleaning_ctx, base_io);
+ struct clean_sort_data *data = cleaning_ctx->data;
+ ocf_cache_t cache = cleaning_ctx->cache;
+
+ uint64_t first_line, last_line;
+ int i;
+ ocf_core_t core;
+
+ if (cb_arg->error == 0) {
+ return 0;
+ }
+
+ // core_line in data array is sorted in xcache_cleaner_clean
+ first_line = xcache_addr_to_line(cache, cb_arg->addr);
+ last_line = xcache_addr_to_line(cache, cb_arg->addr + cb_arg->size - 1);
+ for (i = 0; i < cleaning_ctx->count; i++) {
+ if (data[i].core_line >= first_line) {
+ break;
+ }
+ }
+
+ for (; i < cleaning_ctx->count; i++) {
+ if (data[i].core_line > last_line) {
+ break;
+ }
+ // cache line alock is still hold, don't touch data[i].line
+ data[i].core_line = ULLONG_MAX;
+ core = ocf_cache_get_core(cache, data[i].core_id);
+ ocf_core_stats_core_error_update(core, OCF_WRITE);
+ }
+
+ return 0;
+}
+
+struct dirty_range {
+ uint8_t start_sector;
+ uint8_t last_sector;
+ int idx;
+ uint64_t core_line;
+};
+
+struct clear_range {
+ uint8_t start_sector;
+ uint8_t last_sector;
+ int start_idx;
+ uint64_t start_line;
+ uint64_t last_line;
+};
+
+static void get_dirty_range(ocf_cache_t cache, struct xcache_cleaning_ctx *cleaning_ctx, struct dirty_range *range)
+{
+ struct clean_sort_data *data = cleaning_ctx->data;
+
+ for (; range->idx < cleaning_ctx->count; range->idx++) {
+ get_dirty_sectors(cache, data[range->idx].line, &range->start_sector, &range->last_sector);
+ if (range->start_sector < ocf_line_sectors(cache)) {
+ range->core_line = data[range->idx].core_line;
+ break;
+ }
+
+ range->start_sector = 0;
+ }
+}
+
+static void dirty_range_next(ocf_cache_t cache, struct dirty_range *range)
+{
+ if (range->start_sector >= ocf_line_sectors(cache)) {
+ range->start_sector = 0;
+ range->idx++;
+ } else {
+ range->start_sector++;
+ }
+}
+
+static int merge_dirty_range(ocf_cache_t cache, struct clear_range *clear_range, struct dirty_range *dirty_range)
+{
+ if (clear_range->last_sector != ocf_line_sectors(cache) - 1) {
+ return -1;
+ }
+
+ if (dirty_range->start_sector != 0) {
+ return -1;
+ }
+
+ if (dirty_range->core_line != clear_range->last_line + 1) {
+ return -1;
+ }
+
+ clear_range->last_line = dirty_range->core_line;
+ clear_range->last_sector = dirty_range->last_sector;
+ return 0;
+}
+
+static void submit_dirty_range(ocf_cache_t cache, struct xcache_cleaning_ctx *cleaning_ctx, struct clear_range *clear_range)
+{
+ struct xcache_backdev_io *base_io = &cleaning_ctx->base_io;
+ struct xcache_io *io = &cleaning_ctx->io;
+ uint64_t addr, size, buf_offset;
+
+ addr = xcache_line_to_addr(cache, clear_range->start_line) + SECTORS_TO_BYTES(clear_range->start_sector);
+ size = xcache_line_to_addr(cache, clear_range->last_line - clear_range->start_line) + SECTORS_TO_BYTES((int)clear_range->last_sector - (int)clear_range->start_sector + 1);
+ buf_offset = xcache_line_to_addr(cache, clear_range->start_idx) + SECTORS_TO_BYTES(clear_range->start_sector);
+ xcache_backdev_submit_io(base_io, false, addr, size, buf_offset, OCF_WRITE);
+}
+
+static void xcache_clean_bf(struct xcache_io *io, int error)
+{
+ struct xcache_cleaning_ctx *cleaning_ctx = container_of(io,
+ struct xcache_cleaning_ctx, io);
+ struct xcache_backdev_io *base_io = &cleaning_ctx->base_io;
+ struct clean_sort_data *data = cleaning_ctx->data;
+ ocf_cache_t cache = cleaning_ctx->cache;
+
+ struct dirty_range dirty_range;
+ struct clear_range clear_range;
+
+ remove_failed_line(cleaning_ctx);
+
+ if (error) {
+ xcache_clean_bf_end(io, error);
+ }
+
+ env_atomic_set(&io->remaining, 1);
+ io->end = xcache_clean_bf_end;
+ base_io->end = xcache_clean_write_cb;
+
+ dirty_range.start_sector = 0;
+ dirty_range.idx = 0;
+ get_dirty_range(cache, cleaning_ctx, &dirty_range);
+ while (dirty_range.idx < cleaning_ctx->count) {
+ clear_range.start_sector = dirty_range.start_sector;
+ clear_range.start_line = data[dirty_range.idx].core_line;
+ clear_range.start_idx = dirty_range.idx;
+ clear_range.last_sector = dirty_range.last_sector;
+ clear_range.last_line = data[dirty_range.idx].core_line;
+
+ for (dirty_range_next(cache, &dirty_range);
+ dirty_range.idx < cleaning_ctx->count;
+ dirty_range_next(cache, &dirty_range)) {
+ get_dirty_range(cache, cleaning_ctx, &dirty_range);
+ if (merge_dirty_range(cache, &clear_range, &dirty_range) != 0) {
+ break;
+ }
+ }
+ submit_dirty_range(cache, cleaning_ctx, &clear_range);
+ }
+
+ xcache_io_put(io);
+}
+
+static int xcache_clean_read_cb(struct xcache_backdev_io *base_io, struct backdev_io_end_arg *cb_arg)
+{
+ struct xcache_cleaning_ctx *cleaning_ctx = container_of(base_io,
+ struct xcache_cleaning_ctx, base_io);
+ struct ocf_alock *c = ocf_cache_line_concurrency(cleaning_ctx->cache);
+
+ ocf_cache_line_t line;
+ ocf_core_t core;
+ int i;
+
+ if (cb_arg->error == 0) {
+ return 0;
+ }
+
+ line = addr_to_cache_line(cleaning_ctx->cache, cb_arg->addr);
+ for (i = 0; i < cleaning_ctx->count; i++) {
+ if (cleaning_ctx->data[i].line == line) {
+ xcache_unlock_wr(c, line);
+ cleaning_ctx->data[i].line = INVALID_LINE;
+ core = ocf_cache_get_core(cleaning_ctx->cache, cleaning_ctx->data[i].core_id);
+ ocf_core_stats_cache_error_update(core, OCF_READ);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int xcache_clean_if(struct xcache_io *io)
+{
+ struct xcache_cleaning_ctx *cleaning_ctx = container_of(io,
+ struct xcache_cleaning_ctx, io);
+ struct xcache_backdev_io *base_io = &cleaning_ctx->base_io;
+ ocf_cache_t cache = cleaning_ctx->cache;
+ uint64_t line_size = ocf_line_size(cache);
+ uint64_t buf_offset = 0;
+
+ ocf_cache_line_t line;
+ uint64_t addr;
+ int i;
+
+ io->end = xcache_clean_bf;
+ base_io->end = xcache_clean_read_cb;
+ for (i = 0; i < cleaning_ctx->count; i++) {
+ line = cleaning_ctx->data[i].line;
+ addr = cache_line_to_addr(cache, line, 0);
+ xcache_backdev_submit_io(base_io, true, addr, line_size, buf_offset, OCF_READ);
+ buf_offset += line_size;
+ }
+ return 0;
+}
+
+static inline int sort_data_cmp(struct clean_sort_data *l, struct clean_sort_data *r)
+{
+ if (l->core_id != r->core_id) {
+ return l->core_id - r->core_id;
+ }
+ return l->core_line - r->core_line;
+}
+
+static inline void swap_sort_data(struct clean_sort_data *l, struct clean_sort_data *r)
+{
+ struct clean_sort_data tmp;
+
+ tmp = *l;
+ *l = *r;
+ *r = tmp;
+}
+
+static void clean_quick_sort(struct clean_sort_data *data, int start, int end)
+{
+ int i, j;
+
+ if (start >= end) {
+ return;
+ }
+
+ j = start;
+ for (i = start; i < end; i++) {
+ if (sort_data_cmp(&data[i], &data[end]) < 0) {
+ if (i != j) {
+ swap_sort_data(&data[i], &data[j]);
+ }
+ j++;
+ }
+ }
+ if (j != end) {
+ swap_sort_data(&data[j], &data[end]);
+ }
+
+ clean_quick_sort(data, start, j - 1);
+ clean_quick_sort(data, j + 1, end);
+}
+
+static int xcache_clean_sort(ocf_cache_t cache, struct xcache_cleaning_ctx *cleaning_ctx)
+{
+ struct clean_sort_data *data = cleaning_ctx->data;
+ int count = cleaning_ctx->count;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ ocf_metadata_get_core_info(cache, data[i].line, &data[i].core_id,
+ &data[i].core_line);
+ }
+ clean_quick_sort(data, 0, count - 1);
+
+ return 0;
+}
+
+/*
+ * 1. we can't use spdk_io_to_* here
+ * 2. xcache_cleaner_prepare must be called
+ */
+int xcache_cleaner_clean(ocf_cache_t cache, ocf_queue_t q,
+ struct xcache_cleaning_ctx *cleaning_ctx)
+{
+ struct xcache_io *io = &cleaning_ctx->io;
+ struct xcache_backdev_io *base_io = &cleaning_ctx->base_io;
+
+ cleaning_ctx->cache = cache;
+
+ if (cleaning_ctx->count == 0) {
+ goto err_out;
+ }
+
+ if (xcache_clean_sort(cache, cleaning_ctx) != 0) {
+ goto err_out;
+ }
+
+ base_io->data = ctx_data_alloc(cache->owner, ocf_line_pages(cache) * cleaning_ctx->count);
+ if (base_io->data == NULL) {
+ goto err_out;
+ }
+
+ base_io->xcache_io = io;
+ base_io->end = NULL;
+
+ io->io_if = xcache_clean_if;
+ io->io_queue = q;
+ io->error = 0;
+ env_atomic_set(&io->remaining, 1);
+ xcache_queue_push_xcache_io_back(io, true);
+ return 0;
+
+err_out:
+ xcache_clean_end(cleaning_ctx);
+ return -1;
+}
+
+#define CLEAN_INTERVAL 600000 // 600 seconds
+
+static void xcache_cleaner_run_complete(void *arg)
+{
+ ocf_cleaner_t cleaner = (ocf_cleaner_t)arg;
+
+ ocf_cleaner_run_complete(cleaner, CLEAN_INTERVAL);
+}
+
+void xcache_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue)
+{
+ ocf_cache_t cache;
+ struct xcache_cleaning_ctx *cleaning_ctx;
+ xcache_context_t *xcache_ctx;
+ ocf_cache_line_t cline[XCACHE_CLEAN_SIZE];
+ int cline_num;
+ unsigned lock_idx;
+
+ if (cleaner == NULL || queue == NULL) {
+ return;
+ }
+
+ cache = ocf_cleaner_get_cache(cleaner);
+ cleaning_ctx = xcache_get_cleaning_ctx(cache);
+ xcache_ctx = xcache_get_ctx(cache);
+
+ if (xcache_cleaner_prepare(cleaning_ctx) != 0) {
+ return;
+ }
+
+ if (ocf_cleaner_run_prepare(cleaner, queue) != 0) {
+ xcache_cleaner_complete(cleaning_ctx);
+ return;
+ }
+
+ lock_idx = ocf_metadata_concurrency_next_idx(queue);
+ ocf_metadata_start_shared_access(&cache->metadata.lock, lock_idx);
+
+ cline_num = evicting_line_to_clean(xcache_ctx, cline, XCACHE_CLEAN_SIZE);
+
+ ocf_metadata_end_shared_access(&cache->metadata.lock, lock_idx);
+
+ if (cline_num == 0) {
+ ocf_cleaner_run_complete(cleaner, CLEAN_INTERVAL);
+ xcache_cleaner_complete(cleaning_ctx);
+ return;
+ }
+
+ xcache_cleaner_set_end(cleaning_ctx, xcache_cleaner_run_complete, (void *)cleaner);
+ xcache_cleaner_fill(cleaning_ctx, cline, cline_num);
+ xcache_cleaner_clean(cache, queue, cleaning_ctx);
+}
diff --git a/src/xcache_cleaner.h b/src/xcache_cleaner.h
new file mode 100644
index 0000000..d7c9e4d
--- /dev/null
+++ b/src/xcache_cleaner.h
@@ -0,0 +1,69 @@
+#ifndef XCACHE_CLEANER_H_
+#define XCACHE_CLEANER_H_
+
+#include "ocf/ocf_io.h"
+#include "ocf/ocf_types.h"
+#include "utils/utils_refcnt.h"
+
+#include "ocf/xcache.h"
+
+#define XCACHE_CLEAN_SIZE 32
+
+struct clean_sort_data {
+ ocf_core_id_t core_id;
+ uint64_t core_line;
+ ocf_cache_line_t line;
+};
+
+typedef void (*cleaning_end_t)(void *arg);
+struct xcache_cleaning_ctx {
+ ocf_cache_t cache;
+ struct ocf_refcnt counter;
+ struct xcache_io io;
+ struct xcache_backdev_io base_io;
+ struct clean_sort_data data[XCACHE_CLEAN_SIZE];
+ int count;
+ cleaning_end_t end;
+ void *arg;
+};
+
+int xcache_cleaner_clean(ocf_cache_t cache, ocf_queue_t q,
+ struct xcache_cleaning_ctx *cleaning_ctx);
+
+static inline int xcache_cleaner_prepare(struct xcache_cleaning_ctx *cleaning_ctx)
+{
+ if (ocf_refcnt_inc(&cleaning_ctx->counter) == 1) {
+ return 0;
+ }
+
+ ocf_refcnt_dec(&cleaning_ctx->counter);
+ return -1;
+}
+
+static inline void xcache_cleaner_complete(struct xcache_cleaning_ctx *cleaning_ctx)
+{
+ ocf_refcnt_dec(&cleaning_ctx->counter);
+}
+
+static inline int xcache_cleaner_fill(struct xcache_cleaning_ctx *cleaning_ctx,
+ ocf_cache_line_t *clines, int line_num)
+{
+ struct clean_sort_data *data = cleaning_ctx->data;
+ int num = line_num < XCACHE_CLEAN_SIZE ? line_num : XCACHE_CLEAN_SIZE;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ data[i].line = clines[i];
+ }
+ cleaning_ctx->count = num;
+ return num;
+}
+
+static inline void xcache_cleaner_set_end(struct xcache_cleaning_ctx *cleaning_ctx,
+ cleaning_end_t end, void *arg)
+{
+ cleaning_ctx->end = end;
+ cleaning_ctx->arg = arg;
+}
+
+#endif /* XCACHE_CLEANER_H_ */
diff --git a/src/xcache_lru.c b/src/xcache_lru.c
new file mode 100644
index 0000000..32f9605
--- /dev/null
+++ b/src/xcache_lru.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright(c) 2012-2021 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#include "ocf_cache_priv.h"
+#include "ocf_lru.h"
+#include "metadata/metadata.h"
+#include "utils/utils_alock.h"
+#include "concurrency/ocf_cache_line_concurrency.h"
+
+#include "xcache_cleaner.h"
+#include "engine/xcache_engine_common.h"
+#include "xcache_lru.h"
+#include "evicting/evicting_ops.h"
+
+static inline bool xcache_trylock_hash(struct xcache_io_context *ctx,
+ ocf_core_id_t core_id, uint64_t core_line)
+{
+ if (ocf_metadata_hash_func(xcache_ctx_cache(ctx), core_line, core_id) ==
+ ctx->hash)
+ return true;
+
+ return ocf_hb_cline_naked_trylock_wr(
+ &xcache_ctx_cache(ctx)->metadata.lock,
+ core_id, core_line);
+}
+
+static inline void xcache_unlock_hash(struct xcache_io_context *ctx,
+ ocf_core_id_t core_id, uint64_t core_line)
+{
+ if (ocf_metadata_hash_func(xcache_ctx_cache(ctx), core_line, core_id) !=
+ ctx->hash) {
+ ocf_hb_cline_naked_unlock_wr(
+ &xcache_ctx_cache(ctx)->metadata.lock,
+ core_id, core_line);
+
+ }
+}
+
+static inline bool xcache_eviction_lock(struct xcache_io_context *ctx,
+ ocf_cache_line_t cache_line,
+ ocf_core_id_t *core_id, uint64_t *core_line)
+
+{
+ struct ocf_alock *lock = ocf_cache_line_concurrency(xcache_ctx_cache(ctx));
+
+ ocf_metadata_get_core_info(xcache_ctx_cache(ctx), cache_line,
+ core_id, core_line);
+
+ if (*core_id == ocf_core_get_id(xcache_ctx_core(ctx)) &&
+ *core_line >= ctx->core_line_first &&
+ *core_line <= ctx->core_line_last) {
+ xcache_unlock_wr(lock, cache_line);
+ return false;
+ }
+
+ if (!xcache_trylock_hash(ctx, *core_id, *core_line)) {
+ xcache_unlock_wr(lock, cache_line);
+ return false;
+ }
+
+ return true;
+}
+
+#define EVICT_RETRY_LOCK 3
+static inline ocf_cache_line_t xcache_evict_line(struct xcache_io_context *io_ctx)
+{
+ ocf_cache_t cache = xcache_ctx_cache(io_ctx);
+ xcache_context_t *ctx = xcache_get_ctx(cache);
+ ocf_core_id_t core_id;
+ uint64_t core_line;
+ ocf_cache_line_t cline;
+ int cline_num;
+ int i;
+
+ for (i = 0; i < EVICT_RETRY_LOCK; i++) {
+ cline_num = evicting_line_to_evict(ctx, &cline, 1);
+ if (cline_num == 0) {
+ break;
+ }
+ if (xcache_eviction_lock(io_ctx,
+ cline, &core_id, &core_line)) {
+ goto found;
+ }
+ }
+ return INVALID_LINE;
+
+found:
+ ocf_lru_invalidate(cache, cline, core_id, io_ctx->part_id);
+ xcache_unlock_hash(io_ctx, core_id, core_line);
+
+ xcache_map_cache_line(io_ctx, cline);
+ return cline;
+}
+
+static inline ocf_cache_line_t xcache_free_list_line(ocf_cache_t cache, uint32_t list_idx,
+ struct ocf_part *dst_part)
+{
+ struct ocf_part *free = &cache->free;
+ struct ocf_alock *lock = ocf_cache_line_concurrency(cache);
+ struct ocf_lru_list *list;
+ ocf_cache_line_t cline;
+
+ ocf_metadata_lru_wr_lock(&cache->metadata.lock, list_idx);
+
+ list = ocf_lru_get_list(free, list_idx, true);
+ for (cline = list->tail; cline != INVALID_LINE;
+ cline = ocf_metadata_get_lru(cache, cline)->prev) {
+ if (ocf_alock_trylock_entry_wr(lock, cline)) {
+ break;
+ }
+ }
+
+ if (cline != INVALID_LINE) {
+ ocf_lru_repart_locked(cache, cline, free, dst_part);
+ }
+
+ ocf_metadata_lru_wr_unlock(&cache->metadata.lock,
+ list_idx);
+
+ return cline;
+}
+
+static ocf_cache_line_t xcache_get_free_line(struct xcache_io_context *ctx)
+{
+ struct ocf_part *dst_part;
+ uint32_t start_idx, iter_idx;
+ ocf_cache_line_t cline;
+
+ dst_part = &xcache_ctx_cache(ctx)->user_parts[ctx->part_id].part;
+
+ start_idx = xcache_ctx_queue(ctx)->lru_idx++ % OCF_NUM_LRU_LISTS;
+ for (iter_idx = start_idx; iter_idx < OCF_NUM_LRU_LISTS; iter_idx++) {
+ cline = xcache_free_list_line(xcache_ctx_cache(ctx), iter_idx, dst_part);
+ if (cline != INVALID_LINE) {
+ goto found;
+ }
+ }
+
+ for (iter_idx = 0; iter_idx < start_idx; iter_idx++) {
+ cline = xcache_free_list_line(xcache_ctx_cache(ctx), iter_idx, dst_part);
+ if (cline != INVALID_LINE) {
+ goto found;
+ }
+ }
+
+ return INVALID_LINE;
+
+found:
+ ENV_BUG_ON(metadata_test_dirty(xcache_ctx_cache(ctx), cline));
+ xcache_map_cache_line(ctx, cline);
+ return cline;
+}
+
+ocf_cache_line_t xcache_get_cline(struct xcache_io_context *ctx)
+{
+ ocf_cache_t cache = xcache_ctx_cache(ctx);
+ ocf_cache_line_t line;
+
+ if (ocf_lru_num_free(cache) > 0) {
+ line = xcache_get_free_line(ctx);
+ if (line != INVALID_LINE) {
+ return line;
+ }
+ }
+
+ return xcache_evict_line(ctx);
+}
+
+void xcache_clean(struct xcache_io_context *io_ctx, uint32_t count)
+{
+ ocf_cache_t cache = xcache_ctx_cache(io_ctx);
+ struct xcache_cleaning_ctx *cleaning_ctx = xcache_get_cleaning_ctx(cache);
+ xcache_context_t *ctx = xcache_get_ctx(cache);
+ ocf_cache_line_t cline[XCACHE_CLEAN_SIZE];
+ int cline_num;
+
+ unsigned i;
+ unsigned lock_idx;
+
+ if (ocf_mngt_cache_is_locked(cache))
+ return;
+
+ if (xcache_cleaner_prepare(cleaning_ctx) != 0) {
+ return;
+ }
+
+ lock_idx = ocf_metadata_concurrency_next_idx(xcache_ctx_queue(io_ctx));
+ ocf_metadata_start_shared_access(&cache->metadata.lock, lock_idx);
+
+ count = count < XCACHE_CLEAN_SIZE ? count : XCACHE_CLEAN_SIZE;
+ cline_num = evicting_line_to_clean(ctx, cline, count);
+ xcache_cleaner_fill(cleaning_ctx, cline, cline_num);
+
+ ocf_metadata_end_shared_access(&cache->metadata.lock, lock_idx);
+
+ xcache_cleaner_clean(cache, xcache_ctx_queue(io_ctx), cleaning_ctx);
+}
diff --git a/src/xcache_lru.h b/src/xcache_lru.h
new file mode 100644
index 0000000..87e592e
--- /dev/null
+++ b/src/xcache_lru.h
@@ -0,0 +1,9 @@
+#ifndef __XCACHE_LRU_H__
+#define __XCACHE_LRU_H__
+
+#include "ocf/xcache.h"
+
+ocf_cache_line_t xcache_get_cline(struct xcache_io_context *ctx);
+void xcache_clean(struct xcache_io_context *ctx, uint32_t count);
+
+#endif
diff --git a/src/xcache_ocf_core.c b/src/xcache_ocf_core.c
new file mode 100644
index 0000000..bfd6619
--- /dev/null
+++ b/src/xcache_ocf_core.c
@@ -0,0 +1,45 @@
+#include "ocf/ocf.h"
+#include "ocf_cache_priv.h"
+#include "metadata/metadata.h"
+#include "engine/xcache_engine.h"
+#include "engine/xcache_engine_common.h"
+
+#include "ocf/xcache.h"
+#include "xcache_queue.h"
+#include "qos/qos.h"
+
+void xcache_submit_io(struct xcache_io *io)
+{
+ ocf_core_t core = xcache_io_core(io);
+ ocf_cache_t cache = xcache_io_cache(io);
+ int ret;
+
+ OCF_CHECK_NULL(io);
+
+ if (unlikely(!env_bit_test(ocf_cache_state_running,
+ &cache->cache_state))) {
+ xcache_io_end(io, -OCF_ERR_CACHE_NOT_AVAIL);
+ return;
+ }
+
+ xcache_get_io_if(io, cache->conf_meta->cache_mode);
+ if (io->io_if == NULL) {
+ xcache_io_end(io, -OCF_ERR_INVAL);
+ return;
+ }
+
+ env_atomic_set(&io->remaining, 1);
+ xcache_queue_push_xcache_io_back(io, true);
+}
+
+void xcache_backdev_io_end(struct xcache_backdev_io *bd_io, struct backdev_io_end_arg *arg)
+{
+ struct xcache_io *io = bd_io->xcache_io;
+ xcache_context_t *xcache_ctx = backdev_io_to_xcache_ctx(bd_io);
+
+ xcache_qos_load_sub(&xcache_ctx->qos, bd_io->dev, bd_io->dir, arg->size);
+
+ io->error |= arg->error;
+ bd_io->end(bd_io, arg);
+ xcache_io_put(io);
+}
diff --git a/src/xcache_queue.c b/src/xcache_queue.c
new file mode 100644
index 0000000..01e0445
--- /dev/null
+++ b/src/xcache_queue.c
@@ -0,0 +1,337 @@
+#include "ocf/ocf.h"
+#include "ocf_queue_priv.h"
+#include "ocf_priv.h"
+#include "ocf_cache_priv.h"
+
+#include "ocf/xcache.h"
+#include "xcache_queue.h"
+#include "engine/xcache_engine.h"
+#include "engine/xcache_engine_common.h"
+
+#define ENTRY_MASK ((uintptr_t)0x7)
+
+int xcache_queue_ctx_init(ocf_queue_t queue)
+{
+ struct xcache_queue_ctx *queue_ctx = env_malloc(sizeof(struct xcache_queue_ctx), 0);
+
+ if (queue_ctx == NULL) {
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&queue_ctx->backdev_io_list);
+ queue_ctx->backdev_io_no = 0;
+
+ INIT_LIST_HEAD(&queue_ctx->flush_io_list);
+ queue_ctx->flush_io_no = 0;
+
+ INIT_LIST_HEAD(&queue_ctx->xcache_io_list);
+ queue_ctx->xcache_io_no = 0;
+
+ queue_ctx->bf_data_num = 0;
+ queue->priv1 = (void *)queue_ctx;
+ return 0;
+}
+
+void xcache_queue_ctx_deinit(ocf_queue_t queue)
+{
+ env_free(queue->priv1);
+ queue->priv1 = NULL;
+}
+
+static inline void set_entry_type(struct queue_entry *entry,
+ enum entry_type type)
+{
+ if (type == OCF_REQ_ENTRY) {
+ entry->list.next = (struct list_head *)(((uintptr_t)entry->list.next) | (uintptr_t)1);
+ return;
+ }
+
+ entry->type = type;
+}
+
+static inline enum entry_type get_entry_type(struct queue_entry *entry)
+{
+ if (((uintptr_t)entry->list.next & ENTRY_MASK) != 0) {
+ return OCF_REQ_ENTRY;
+ }
+
+ return entry->type;
+}
+
+static inline void list_add_entry(struct list_head *list, struct list_head *head)
+{
+ list_add(list, head);
+}
+
+static inline void list_add_tail_entry(struct list_head *list, struct list_head *head)
+{
+ struct list_head *prev = head->prev;
+ uintptr_t bits = (uintptr_t)prev->next & ENTRY_MASK;
+
+ list->next = head;
+ list->prev = prev;
+
+ prev->next = (struct list_head *)((uintptr_t)list | bits);
+ head->prev = list;
+}
+
+static inline void list_pop_front_entry(struct queue_entry *entry)
+{
+ struct list_head *next = (struct list_head *)((uintptr_t)entry->list.next & ~ENTRY_MASK);
+
+ entry->list.prev->next = next;
+ next->prev = entry->list.prev;
+}
+
+static inline struct queue_entry *xcache_io_to_entry(struct xcache_io *io)
+{
+ return (struct queue_entry *)io;
+}
+
+static inline struct xcache_io *entry_to_xcache_io(struct queue_entry *entry)
+{
+ return (struct xcache_io *)entry;
+}
+
+static inline struct queue_entry *backdev_io_to_entry(struct xcache_backdev_io *base_io)
+{
+ return (struct queue_entry *)base_io;
+}
+
+static inline struct xcache_backdev_io *entry_to_backdev_io(struct queue_entry *entry)
+{
+ return (struct xcache_backdev_io *)entry;
+}
+
+static inline struct queue_entry *ocf_req_to_entry(struct ocf_request *req)
+{
+ return (struct queue_entry *)container_of(&req->list, struct queue_entry, list);
+}
+
+static inline struct ocf_request *entry_to_ocf_req(struct queue_entry *entry)
+{
+ return (struct ocf_request *)container_of(&(entry->list), struct ocf_request, list);
+}
+
+static void xcache_io_run(struct queue_entry *entry)
+{
+ struct xcache_io *io = entry_to_xcache_io(entry);
+
+ io->io_if(io);
+
+ xcache_io_put(io);
+}
+
+static void backdev_io_run(struct queue_entry *entry)
+{
+ struct xcache_backdev_io *base_io = entry_to_backdev_io(entry);
+ struct xcache_io *io = base_io->xcache_io;
+
+ base_io->io_res(base_io);
+
+ xcache_io_put(io);
+}
+
+static void ocf_req_run(struct queue_entry *entry)
+{
+ struct ocf_request *req = entry_to_ocf_req(entry);
+
+ if (req->ioi.io.handle)
+ req->ioi.io.handle(&req->ioi.io, req);
+ else
+ ocf_io_handle(&req->ioi.io, req);
+}
+
+static struct queue_entry_ops {
+ void (*entry_run)(struct queue_entry *entry);
+} queue_entry_ops[] = {
+ [XCACHE_IO_ENTRY] = {
+ .entry_run = xcache_io_run,
+ },
+ [XCACHE_BACKDEV_IO_ENTRY] = {
+ .entry_run = backdev_io_run,
+ },
+ [OCF_REQ_ENTRY] = {
+ .entry_run = ocf_req_run,
+ },
+};
+
+static inline void queue_entry_run(enum entry_type type, struct queue_entry *entry)
+{
+ queue_entry_ops[type].entry_run(entry);
+}
+
+static struct queue_entry *xcache_queue_pop_entry(ocf_queue_t q)
+{
+ unsigned long lock_flags = 0;
+ struct queue_entry *entry;
+
+ OCF_CHECK_NULL(q);
+
+ env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
+
+ if (list_empty(&q->io_list)) {
+ /* No items on the list */
+ env_spinlock_unlock_irqrestore(&q->io_list_lock,
+ lock_flags);
+ return NULL;
+ }
+
+ /* Get the first request and remove it from the list */
+ entry = list_first_entry(&q->io_list, struct queue_entry, list);
+
+ env_atomic_dec(&q->io_no);
+ list_pop_front_entry(entry);
+
+ env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
+
+ OCF_CHECK_NULL(entry);
+
+ return entry;
+}
+
+// overwrite ocf_queue_run_single
+void ocf_queue_run_single(ocf_queue_t q)
+{
+ struct queue_entry *entry = NULL;
+
+ OCF_CHECK_NULL(q);
+
+ entry = xcache_queue_pop_entry(q);
+
+ if (!entry)
+ return;
+
+ queue_entry_run(get_entry_type(entry), entry);
+}
+
+// only called by request in queue to avoid lock
+struct xcache_backdev_io *xcache_queue_alloc_backdev_io(ocf_queue_t q)
+{
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+ struct xcache_backdev_io *io;
+
+ if (queue_ctx->backdev_io_no == 0) {
+ return env_malloc(sizeof(struct xcache_backdev_io), 0);
+ }
+
+ io = list_first_entry(&queue_ctx->backdev_io_list, struct xcache_backdev_io, free_list);
+ list_del(&io->free_list);
+ queue_ctx->backdev_io_no--;
+ return io;
+}
+
+void xcache_queue_free_backdev_io(ocf_queue_t q, struct xcache_backdev_io *io)
+{
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+
+ if (queue_ctx->backdev_io_no > QUEUE_CACHE_SIZE) {
+ env_free(io);
+ return;
+ }
+
+ list_add_tail(&io->free_list, &queue_ctx->backdev_io_list);
+ queue_ctx->backdev_io_no++;
+}
+
+struct xcache_io *xcache_queue_alloc_xcache_io(ocf_queue_t q)
+{
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+ struct xcache_io *io;
+
+ if (queue_ctx->xcache_io_no == 0) {
+ return env_malloc(sizeof(struct xcache_io), 0);
+ }
+
+ io = list_first_entry(&queue_ctx->xcache_io_list, struct xcache_io, queue_list);
+ list_del(&io->queue_list);
+ queue_ctx->xcache_io_no--;
+ return io;
+}
+
+void xcache_queue_free_xcache_io(ocf_queue_t q, struct xcache_io *io)
+{
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+
+ if (queue_ctx->xcache_io_no > QUEUE_CACHE_SIZE) {
+ env_free(io);
+ return;
+ }
+
+ list_add_tail(&io->queue_list, &queue_ctx->xcache_io_list);
+ queue_ctx->xcache_io_no++;
+}
+
+ctx_data_t *xcache_queue_alloc_line_data(ocf_queue_t q, ocf_cache_t cache)
+{
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+
+ if (queue_ctx->bf_data_num > 0) {
+ return queue_ctx->bf_data[--queue_ctx->bf_data_num];
+ } else {
+ return ctx_data_alloc(cache->owner, (ocf_line_size(cache) + PAGE_SIZE - 1) / PAGE_SIZE);
+ }
+}
+
+void xcache_queue_free_line_data(ocf_queue_t q, ocf_cache_t cache, ctx_data_t *data)
+{
+ struct xcache_queue_ctx *queue_ctx = xcache_get_queue_ctx(q);
+
+ if (queue_ctx->bf_data_num < QUEUE_CACHE_SIZE) {
+ queue_ctx->bf_data[queue_ctx->bf_data_num++] = data;
+ } else {
+ ctx_data_free(cache->owner, data);
+ }
+}
+
+static void xcache_queue_push_entry(ocf_queue_t q, struct queue_entry *entry, bool at_head, bool allow_sync, enum entry_type type)
+{
+ ocf_cache_t cache = ocf_queue_get_cache(q);
+ unsigned long lock_flags = 0;
+
+ INIT_LIST_HEAD(&entry->list);
+
+ env_atomic_set(&cache->last_access_ms,
+ env_ticks_to_msecs(env_get_tick_count()));
+
+ env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
+
+ if (at_head) {
+ list_add_entry(&entry->list, &q->io_list);
+ } else {
+ list_add_tail_entry(&entry->list, &q->io_list);
+ }
+ // type set must under lock
+ set_entry_type(entry, type);
+ env_atomic_inc(&q->io_no);
+
+ env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
+
+ ocf_queue_kick(q, allow_sync);
+}
+
+void xcache_queue_push_xcache_io_back(struct xcache_io *io, bool allow_sync)
+{
+ struct queue_entry *entry = xcache_io_to_entry(io);
+ xcache_queue_push_entry(xcache_io_queue(io), entry, false, allow_sync, XCACHE_IO_ENTRY);
+}
+
+void xcache_queue_push_backdev_io_front(struct xcache_backdev_io *base_io, bool allow_sync)
+{
+ struct queue_entry *entry = backdev_io_to_entry(base_io);
+ xcache_queue_push_entry(xcache_io_queue(base_io->xcache_io), entry, true, allow_sync, XCACHE_BACKDEV_IO_ENTRY);
+}
+
+// overwrite ocf_engine_push_req_front
+void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
+{
+ struct queue_entry *entry = ocf_req_to_entry(req);
+ xcache_queue_push_entry(req->io_queue, entry, true, allow_sync, OCF_REQ_ENTRY);
+}
+
+// overwrite ocf_engine_push_req_back
+void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
+{
+ struct queue_entry *entry = ocf_req_to_entry(req);
+ xcache_queue_push_entry(req->io_queue, entry, false, allow_sync, OCF_REQ_ENTRY);
+}
diff --git a/src/xcache_queue.h b/src/xcache_queue.h
new file mode 100644
index 0000000..9a9fd23
--- /dev/null
+++ b/src/xcache_queue.h
@@ -0,0 +1,35 @@
+#ifndef XCACHE_QUEUE_H_
+#define XCACHE_QUEUE_H_
+
+#include "ocf/xcache.h"
+
+#define QUEUE_CACHE_SIZE 128
+
+struct xcache_queue_ctx {
+ struct list_head backdev_io_list;
+ int backdev_io_no;
+ struct list_head flush_io_list;
+ int flush_io_no;
+ struct list_head xcache_io_list;
+ int xcache_io_no;
+ void *bf_data[QUEUE_CACHE_SIZE];
+ int bf_data_num;
+};
+
+int xcache_queue_ctx_init(ocf_queue_t queue);
+void xcache_queue_ctx_deinit(ocf_queue_t queue);
+static inline struct xcache_queue_ctx *xcache_get_queue_ctx(ocf_queue_t queue)
+{
+ return (struct xcache_queue_ctx *)(queue->priv1);
+}
+
+struct xcache_backdev_io *xcache_queue_alloc_backdev_io(ocf_queue_t q);
+void xcache_queue_free_backdev_io(ocf_queue_t q, struct xcache_backdev_io *io);
+void xcache_queue_push_xcache_io_back(struct xcache_io *io, bool allow_sync);
+void xcache_queue_push_backdev_io_front(struct xcache_backdev_io *base_io, bool allow_sync);
+struct xcache_io *xcache_queue_alloc_xcache_io(ocf_queue_t q);
+void xcache_queue_free_xcache_io(ocf_queue_t q, struct xcache_io *io);
+
+ctx_data_t *xcache_queue_alloc_line_data(ocf_queue_t q, ocf_cache_t cache);
+void xcache_queue_free_line_data(ocf_queue_t q, ocf_cache_t cache, ctx_data_t *data);
+#endif
--
2.30.0