mirror of
https://github.com/huggingface/candle.git
synced 2025-06-20 12:06:35 +00:00
33 lines
1.1 KiB
Rust
33 lines
1.1 KiB
Rust
#[cfg(feature = "mkl")]
|
|
extern crate intel_mkl_src;
|
|
|
|
#[cfg(feature = "accelerate")]
|
|
extern crate accelerate_src;
|
|
|
|
use candle::{Device, Result, Tensor};
|
|
|
|
#[test]
|
|
fn kv_cache() -> Result<()> {
|
|
let mut cache = candle_nn::kv_cache::Cache::new(0, 16);
|
|
for _ in [0, 1] {
|
|
assert_eq!(cache.current_seq_len(), 0);
|
|
let data = cache.current_data()?;
|
|
assert!(data.is_none());
|
|
let t = Tensor::new(&[1f32, 2., 3.], &Device::Cpu)?;
|
|
cache.append(&t)?;
|
|
let data = cache.current_data()?.unwrap();
|
|
assert_eq!(data.to_vec1::<f32>()?, [1., 2., 3.]);
|
|
let t = Tensor::new(&[4f32], &Device::Cpu)?;
|
|
cache.append(&t)?;
|
|
let data = cache.current_data()?.unwrap();
|
|
assert_eq!(data.to_vec1::<f32>()?, [1., 2., 3., 4.]);
|
|
let t = Tensor::new(&[0f32, 5., 6., 7.], &Device::Cpu)?;
|
|
cache.append(&t)?;
|
|
let data = cache.current_data()?.unwrap();
|
|
assert_eq!(data.to_vec1::<f32>()?, [1., 2., 3., 4., 0., 5., 6., 7.]);
|
|
assert_eq!(cache.current_seq_len(), 8);
|
|
cache.reset();
|
|
}
|
|
Ok(())
|
|
}
|