Skip to content

Commit

Permalink
Support use_cache_only and disable chunk map
Browse files Browse the repository at this point in the history
Signed-off-by: zyfjeff <[email protected]>
  • Loading branch information
zyfjeff committed Sep 25, 2023
1 parent 186bf8f commit 96cbb94
Show file tree
Hide file tree
Showing 7 changed files with 180 additions and 6 deletions.
12 changes: 10 additions & 2 deletions api/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,7 @@ impl BackendConfigV2 {
}
None => return false,
},
"noop" => return true,
_ => return false,
}

Expand Down Expand Up @@ -755,9 +756,9 @@ pub struct FileCacheConfig {
/// Key for data encryption, a heximal representation of [u8; 32].
#[serde(default)]
pub encryption_key: String,
// disbale chunk map, it is assumed that all data is ready
/// disable chunk map func,
#[serde(default)]
pub disbale_chunk_map: bool,
pub disable_chunk_map: bool,
}

impl FileCacheConfig {
Expand Down Expand Up @@ -845,6 +846,9 @@ pub struct RafsConfigV2 {
/// Filesystem prefetching configuration.
#[serde(default)]
pub prefetch: PrefetchConfigV2,
// Only use cache, don't access the backend
#[serde(default)]
pub use_cache_only: bool,
}

impl RafsConfigV2 {
Expand Down Expand Up @@ -1369,6 +1373,9 @@ struct RafsConfig {
// ZERO value means, amplifying user io is not enabled.
#[serde(default = "default_batch_size")]
pub amplify_io: usize,
// Only use cache, don't access the backend
#[serde(default)]
pub use_cache_only: bool,
}

impl TryFrom<RafsConfig> for ConfigV2 {
Expand All @@ -1386,6 +1393,7 @@ impl TryFrom<RafsConfig> for ConfigV2 {
access_pattern: v.access_pattern,
latest_read_files: v.latest_read_files,
prefetch: v.fs_prefetch.into(),
use_cache_only: v.use_cache_only,
};
if !cache.prefetch.enable && rafs.prefetch.enable {
cache.prefetch = rafs.prefetch.clone();
Expand Down
2 changes: 2 additions & 0 deletions builder/src/core/blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,9 +229,11 @@ impl Blob {
meta.set_len(
aligned_uncompressed_size + size_of::<BlobCompressionContextHeader>() as u64,
)?;
meta.seek(std::io::SeekFrom::Start(0))?;
meta.write_all(ci_data)?;
meta.seek(std::io::SeekFrom::Start(aligned_uncompressed_size))?;
meta.write_all(header.as_bytes())?;
meta.flush()?;
}
let encrypted_header =
crypt::encrypt_with_context(header.as_bytes(), cipher_obj, cipher_ctx, encrypt)?;
Expand Down
2 changes: 2 additions & 0 deletions storage/src/backend/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ pub mod registry;
#[cfg(feature = "backend-s3")]
pub mod s3;

pub mod noop;

/// Error codes related to storage backend operations.
#[derive(Debug)]
pub enum BackendError {
Expand Down
91 changes: 91 additions & 0 deletions storage/src/backend/noop.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
// Copyright (C) 2020-2021 Alibaba Cloud. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0

//! Storage backend driver to access blobs on local filesystems.
use std::io::Result;
use std::sync::Arc;

use fuse_backend_rs::file_buf::FileVolatileSlice;
use nydus_utils::metrics::BackendMetrics;

use crate::backend::{BackendError, BackendResult, BlobBackend, BlobReader};

#[derive(Debug)]
pub enum NoopError {
Noop,
}

/// a Noop backend, do nothing
#[derive(Default)]
pub struct Noop {
metrics: Arc<BackendMetrics>,
}

impl Noop {
pub fn new(id: Option<&str>) -> Result<Self> {
let id = id.ok_or_else(|| einval!("noop requires blob_id"))?;
Ok(Noop {
metrics: BackendMetrics::new(id, "noop"),
})
}
}

struct NoopEntry {
blob_id: String,
metrics: Arc<BackendMetrics>,
}

impl BlobReader for NoopEntry {
fn blob_size(&self) -> BackendResult<u64> {
Err(BackendError::Unsupported(format!(
"unsupport blob_size operation for {}",
self.blob_id,
)))
}

fn try_read(&self, _buf: &mut [u8], _offset: u64) -> BackendResult<usize> {
Err(BackendError::Unsupported(format!(
"unsupport try_read operation for {}",
self.blob_id,
)))
}

fn readv(
&self,
_bufs: &[FileVolatileSlice],
_offset: u64,
_max_size: usize,
) -> BackendResult<usize> {
Err(BackendError::Unsupported(format!(
"unsupport readv operation for {}",
self.blob_id,
)))
}

fn metrics(&self) -> &BackendMetrics {
&self.metrics
}
}

impl BlobBackend for Noop {
fn shutdown(&self) {}

fn metrics(&self) -> &BackendMetrics {
&self.metrics
}

fn get_reader(&self, blob_id: &str) -> BackendResult<Arc<dyn BlobReader>> {
Ok(Arc::new(NoopEntry {
blob_id: blob_id.to_owned(),
metrics: self.metrics.clone(),
}))
}
}

impl Drop for Noop {
fn drop(&mut self) {
self.metrics.release().unwrap_or_else(|e| error!("{:?}", e));
}
}
5 changes: 2 additions & 3 deletions storage/src/cache/filecache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ impl FileCacheMgr {
worker_mgr: Arc::new(worker_mgr),
work_dir: work_dir.to_owned(),
disable_indexed_map: blob_cfg.disable_indexed_map,
disable_chunk_map: blob_cfg.disbale_chunk_map,
disable_chunk_map: blob_cfg.disable_chunk_map,
validate: config.cache_validate,
cache_raw_data: config.cache_compressed,
cache_encrypted: blob_cfg.enable_encryption,
Expand Down Expand Up @@ -233,8 +233,7 @@ impl FileCacheEntry {
} else {
let blob_file_path = format!("{}/{}", mgr.work_dir, blob_meta_id);
let (chunk_map, is_direct_chunkmap) = if mgr.disable_chunk_map {
let chunk_map =
Arc::new(BlobStateMap::from(NoopChunkMap::new(true))) as Arc<dyn ChunkMap>;
let chunk_map = Arc::new(NoopChunkMap::new(true)) as Arc<dyn ChunkMap>;
(chunk_map, true)
} else {
Self::create_chunk_map(mgr, &blob_info, &blob_file_path)?
Expand Down
61 changes: 61 additions & 0 deletions storage/src/cache/state/noop_chunk_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ use std::io::Result;
use crate::cache::state::{ChunkIndexGetter, ChunkMap};
use crate::device::BlobChunkInfo;

use super::RangeMap;

/// A dummy implementation of the [ChunkMap] trait.
///
/// The `NoopChunkMap` is an dummy implementation of [ChunkMap], which just reports every chunk as
Expand All @@ -26,6 +28,41 @@ impl ChunkMap for NoopChunkMap {
fn is_ready(&self, _chunk: &dyn BlobChunkInfo) -> Result<bool> {
Ok(self.cached)
}

fn set_ready_and_clear_pending(&self, _chunk: &dyn BlobChunkInfo) -> Result<()> {
Ok(())
}

fn check_ready_and_mark_pending(
&self,
_chunk: &dyn BlobChunkInfo,
) -> crate::StorageResult<bool> {
return Ok(true);
}

fn is_persist(&self) -> bool {
true
}

fn as_range_map(&self) -> Option<&dyn RangeMap<I = u32>> {
Some(self)
}

fn is_pending(&self, _chunk: &dyn BlobChunkInfo) -> Result<bool> {
Ok(false)
}

fn is_ready_or_pending(&self, chunk: &dyn BlobChunkInfo) -> Result<bool> {
if matches!(self.is_pending(chunk), Ok(true)) {
Ok(true)
} else {
self.is_ready(chunk)
}
}

fn clear_pending(&self, _chunk: &dyn BlobChunkInfo) {
panic!("no support of clear_pending()");
}
}

impl ChunkIndexGetter for NoopChunkMap {
Expand All @@ -35,3 +72,27 @@ impl ChunkIndexGetter for NoopChunkMap {
chunk.id()
}
}

impl RangeMap for NoopChunkMap {
type I = u32;
#[inline]
fn is_range_all_ready(&self) -> bool {
true
}

fn is_range_ready(&self, _start_index: u32, _count: u32) -> Result<bool> {
Ok(true)
}

fn check_range_ready_and_mark_pending(
&self,
_start_index: u32,
_count: u32,
) -> Result<Option<Vec<u32>>> {
Ok(None)
}

fn set_range_ready_and_clear_pending(&self, _start_index: u32, _count: u32) -> Result<()> {
Ok(())
}
}
13 changes: 12 additions & 1 deletion storage/src/factory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ use crate::backend::http_proxy;
use crate::backend::localdisk;
#[cfg(feature = "backend-localfs")]
use crate::backend::localfs;
use crate::backend::noop::Noop;
#[cfg(feature = "backend-oss")]
use crate::backend::oss;
#[cfg(feature = "backend-registry")]
Expand Down Expand Up @@ -117,6 +118,7 @@ impl BlobFactory {
) -> IOResult<Arc<dyn BlobCache>> {
let backend_cfg = config.get_backend_config()?;
let cache_cfg = config.get_cache_config()?;
let rafs_cfg = config.get_rafs_config()?;
let key = BlobCacheMgrKey {
config: config.clone(),
};
Expand All @@ -125,7 +127,12 @@ impl BlobFactory {
if let Some(mgr) = guard.get(&key) {
return mgr.get_blob_cache(blob_info);
}
let backend = Self::new_backend(backend_cfg, &blob_info.blob_id())?;

let backend = if rafs_cfg.use_cache_only {
Self::new_noop_backend(&blob_info.blob_id())?
} else {
Self::new_backend(backend_cfg, &blob_info.blob_id())?
};
let mgr = match cache_cfg.cache_type.as_str() {
"blobcache" | "filecache" => {
let mgr = FileCacheMgr::new(cache_cfg, backend, ASYNC_RUNTIME.clone(), &config.id)?;
Expand Down Expand Up @@ -190,6 +197,9 @@ impl BlobFactory {
}
}

pub fn new_noop_backend(blob_id: &str) -> IOResult<Arc<dyn BlobBackend + Send + Sync>> {
Ok(Arc::new(Noop::new(Some(blob_id))?))
}
/// Create a storage backend for the blob with id `blob_id`.
#[allow(unused_variables)]
pub fn new_backend(
Expand Down Expand Up @@ -227,6 +237,7 @@ impl BlobFactory {
config.get_http_proxy_config()?,
Some(blob_id),
)?)),
"noop" => Ok(Arc::new(Noop::new(Some(blob_id))?)),
_ => Err(einval!(format!(
"unsupported backend type '{}'",
config.backend_type
Expand Down

0 comments on commit 96cbb94

Please sign in to comment.