mirror of
https://github.com/huggingface/candle.git
synced 2025-06-17 11:08:52 +00:00
Mirror GGML's unit tests (#569)
* Add ggml unit tests * simplify random matmul test for other test cases
This commit is contained in:
@ -1,8 +1,16 @@
|
|||||||
use candle_core::{quantized, Device, Result, Tensor};
|
use candle_core::{quantized, Device, Result, Tensor};
|
||||||
use quantized::{k_quants, GgmlType};
|
use quantized::{k_quants, GgmlType};
|
||||||
mod test_utils;
|
mod test_utils;
|
||||||
|
use rand::prelude::*;
|
||||||
use test_utils::to_vec2_round;
|
use test_utils::to_vec2_round;
|
||||||
|
|
||||||
|
const GGML_TEST_SIZE: usize = 32 * 128;
|
||||||
|
|
||||||
|
const GGML_MAX_QUANTIZATION_TOTAL_ERROR: f32 = 0.002;
|
||||||
|
const GGML_MAX_QUANTIZATION_TOTAL_ERROR_2BITS: f32 = 0.0075;
|
||||||
|
const GGML_MAX_QUANTIZATION_TOTAL_ERROR_3BITS: f32 = 0.0040;
|
||||||
|
const GGML_MAX_DOT_PRODUCT_ERROR: f32 = 0.02;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn quantized_matmul() -> Result<()> {
|
fn quantized_matmul() -> Result<()> {
|
||||||
let cpu = &Device::Cpu;
|
let cpu = &Device::Cpu;
|
||||||
@ -121,6 +129,10 @@ fn quantize_q4_0() -> Result<()> {
|
|||||||
127.0, 127.0
|
127.0, 127.0
|
||||||
]
|
]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
//mirrored GGML unit test
|
||||||
|
ggml_quantization_error_test::<BlockQ4_0>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -166,6 +178,41 @@ fn compare_with_error(values: &[f32], expected: &[f32], tolerance: f32) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates a vector simillarly to the one used in GGML unit tests: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L26-L30
|
||||||
|
fn create_ggml_like_vector(offset: f32) -> Vec<f32> {
|
||||||
|
let mut vec = vec![0.0; GGML_TEST_SIZE];
|
||||||
|
for (i, item) in vec.iter_mut().enumerate() {
|
||||||
|
*item = 0.1 + 2.0 * (i as f32 + offset).cos();
|
||||||
|
}
|
||||||
|
vec
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculates the root mean square error between two vectors
|
||||||
|
fn calculate_rmse(a: &[f32], b: &[f32]) -> f32 {
|
||||||
|
assert_eq!(a.len(), b.len());
|
||||||
|
let mut sum = 0.0;
|
||||||
|
for i in 0..a.len() {
|
||||||
|
sum += (a[i] - b[i]).powi(2);
|
||||||
|
}
|
||||||
|
(sum).sqrt() / a.len() as f32
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Mirrores the GGML quanitzation unit test: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L43-L50
|
||||||
|
fn ggml_quantization_error_test<T: GgmlType>(max_error: f32) -> Result<()> {
|
||||||
|
let src = create_ggml_like_vector(0.0);
|
||||||
|
let mut dst = vec![0.0; GGML_TEST_SIZE];
|
||||||
|
let _quant = quantize_roundtrip::<T>(src.as_slice(), dst.as_mut_slice())?;
|
||||||
|
let error = calculate_rmse(src.as_slice(), dst.as_slice());
|
||||||
|
if error > max_error {
|
||||||
|
candle_core::bail!(
|
||||||
|
"Quantization error {} exceeds max error {}",
|
||||||
|
error,
|
||||||
|
max_error
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn quantize_roundtrip<T: GgmlType>(src: &[f32], dst: &mut [f32]) -> Result<Vec<T>> {
|
fn quantize_roundtrip<T: GgmlType>(src: &[f32], dst: &mut [f32]) -> Result<Vec<T>> {
|
||||||
let mut quant = vec![T::zeros(); src.len() / T::BLCK_SIZE];
|
let mut quant = vec![T::zeros(); src.len() / T::BLCK_SIZE];
|
||||||
T::from_float(src, &mut quant)?;
|
T::from_float(src, &mut quant)?;
|
||||||
@ -195,6 +242,9 @@ fn quantize_q2k() -> Result<()> {
|
|||||||
let (src_big, mut dst_big) = get_test_vector(128.0, Some(1024));
|
let (src_big, mut dst_big) = get_test_vector(128.0, Some(1024));
|
||||||
let _quant_big = quantize_roundtrip::<BlockQ2K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
let _quant_big = quantize_roundtrip::<BlockQ2K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
||||||
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 6.0);
|
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 6.0);
|
||||||
|
|
||||||
|
//mirrored GGML unit test
|
||||||
|
ggml_quantization_error_test::<BlockQ2K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR_2BITS)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,6 +270,9 @@ fn quantize_q3k() -> Result<()> {
|
|||||||
let (src_big, mut dst_big) = get_test_vector(128.0, Some(1024));
|
let (src_big, mut dst_big) = get_test_vector(128.0, Some(1024));
|
||||||
let _quant_big = quantize_roundtrip::<BlockQ3K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
let _quant_big = quantize_roundtrip::<BlockQ3K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
||||||
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 3.5);
|
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 3.5);
|
||||||
|
|
||||||
|
//mirrored GGML unit test
|
||||||
|
ggml_quantization_error_test::<BlockQ3K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR_3BITS)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -245,6 +298,9 @@ fn quantize_q4k() -> Result<()> {
|
|||||||
let (src_big, mut dst_big) = get_test_vector(128.0, Some(1024));
|
let (src_big, mut dst_big) = get_test_vector(128.0, Some(1024));
|
||||||
let _quant_big = quantize_roundtrip::<BlockQ4K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
let _quant_big = quantize_roundtrip::<BlockQ4K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
||||||
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 4.5);
|
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 4.5);
|
||||||
|
|
||||||
|
//mirrored GGML unit test
|
||||||
|
ggml_quantization_error_test::<BlockQ4K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,6 +326,10 @@ fn quantize_q5k() -> Result<()> {
|
|||||||
let (src_big, mut dst_big) = get_test_vector(128.0, Some(1024));
|
let (src_big, mut dst_big) = get_test_vector(128.0, Some(1024));
|
||||||
let _quant_big = quantize_roundtrip::<BlockQ5K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
let _quant_big = quantize_roundtrip::<BlockQ5K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
||||||
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 2.5);
|
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 2.5);
|
||||||
|
|
||||||
|
//mirrored GGML unit test
|
||||||
|
ggml_quantization_error_test::<BlockQ5K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,6 +356,9 @@ fn quantize_q6k() -> Result<()> {
|
|||||||
let _quant_big = quantize_roundtrip::<BlockQ6K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
let _quant_big = quantize_roundtrip::<BlockQ6K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
||||||
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 2.0);
|
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 2.0);
|
||||||
|
|
||||||
|
//mirrored GGML unit test
|
||||||
|
ggml_quantization_error_test::<BlockQ6K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -322,18 +385,56 @@ fn quantize_q8k() -> Result<()> {
|
|||||||
let _quant_big = quantize_roundtrip::<BlockQ8K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
let _quant_big = quantize_roundtrip::<BlockQ8K>(src_big.as_slice(), dst_big.as_mut_slice())?;
|
||||||
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 0.6);
|
compare_with_error(dst_big.as_slice(), src_big.as_slice(), 0.6);
|
||||||
|
|
||||||
|
//mirrored GGML unit test
|
||||||
|
ggml_quantization_error_test::<BlockQ8K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
/// Very simple dot product implementation
|
||||||
fn quantized_matmul_q6k() -> Result<()> {
|
fn vec_dot_referenze(a: &[f32], b: &[f32]) -> f32 {
|
||||||
use k_quants::BlockQ6K;
|
let mut sum = 0.0;
|
||||||
use rand::prelude::*;
|
for (a_i, b_i) in a.iter().zip(b) {
|
||||||
|
sum += a_i * b_i;
|
||||||
|
}
|
||||||
|
sum
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Mirrores the GGML matmul unit test: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L76-L91
|
||||||
|
fn ggml_matmul_error_test<T: GgmlType>() -> Result<()> {
|
||||||
|
let a = create_ggml_like_vector(0.0);
|
||||||
|
let b = create_ggml_like_vector(1.0);
|
||||||
|
let length = a.len();
|
||||||
|
|
||||||
|
let mut a_quant = vec![T::zeros(); length / T::BLCK_SIZE];
|
||||||
|
let mut b_quant = vec![T::VecDotType::zeros(); length / T::VecDotType::BLCK_SIZE];
|
||||||
|
T::from_float(&a, &mut a_quant)?;
|
||||||
|
T::VecDotType::from_float(&b, &mut b_quant)?;
|
||||||
|
|
||||||
|
let result = T::vec_dot(length, &a_quant, &b_quant)?;
|
||||||
|
let reference_result = vec_dot_referenze(&a, &b);
|
||||||
|
|
||||||
|
let error = (result - reference_result).abs() / length as f32;
|
||||||
|
|
||||||
|
if error > GGML_MAX_DOT_PRODUCT_ERROR {
|
||||||
|
candle_core::bail!(
|
||||||
|
"Dot product error {} exceeds max error {}",
|
||||||
|
error,
|
||||||
|
GGML_MAX_DOT_PRODUCT_ERROR
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// generates random tensors of size `m x k` and `n x k` and calculates their expected matrix multiplication result.
|
||||||
|
fn get_random_tensors(
|
||||||
|
m: usize,
|
||||||
|
k: usize,
|
||||||
|
n: usize,
|
||||||
|
device: &Device,
|
||||||
|
) -> Result<(Tensor, Tensor, Tensor)> {
|
||||||
let mut rng = StdRng::seed_from_u64(314159265358979);
|
let mut rng = StdRng::seed_from_u64(314159265358979);
|
||||||
|
|
||||||
let cpu = &Device::Cpu;
|
|
||||||
let (m, k, n) = (11, 512, 21);
|
|
||||||
let lhs = (0..m * k)
|
let lhs = (0..m * k)
|
||||||
.map(|_| rng.gen::<f32>() - 0.5)
|
.map(|_| rng.gen::<f32>() - 0.5)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
@ -341,16 +442,23 @@ fn quantized_matmul_q6k() -> Result<()> {
|
|||||||
.map(|_| rng.gen::<f32>() - 0.5)
|
.map(|_| rng.gen::<f32>() - 0.5)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let lhs = Tensor::from_vec(lhs, (m, k), cpu)?;
|
let lhs = Tensor::from_vec(lhs, (m, k), device)?;
|
||||||
let rhs = Tensor::from_vec(rhs, (n, k), cpu)?;
|
let rhs = Tensor::from_vec(rhs, (n, k), device)?;
|
||||||
|
|
||||||
let mm = lhs.matmul(&rhs.t()?)?;
|
let mm = lhs.matmul(&rhs.t()?)?;
|
||||||
|
Ok((lhs, rhs, mm))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn quantized_matmul_q6k() -> Result<()> {
|
||||||
|
use k_quants::BlockQ6K;
|
||||||
|
|
||||||
|
let cpu = &Device::Cpu;
|
||||||
|
let (m, k, n) = (11, 512, 21);
|
||||||
|
let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?;
|
||||||
assert_eq!(mm.dims(), [m, n]);
|
assert_eq!(mm.dims(), [m, n]);
|
||||||
let dst = mm.flatten_all()?.to_vec1::<f32>()?;
|
let dst = mm.flatten_all()?.to_vec1::<f32>()?;
|
||||||
let dst = [dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]
|
let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]);
|
||||||
.iter()
|
|
||||||
.map(|x| (1000. * x).round() / 1000.)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]);
|
assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]);
|
||||||
|
|
||||||
let rhs = quantized::QTensor::quantize::<BlockQ6K>(&rhs)?;
|
let rhs = quantized::QTensor::quantize::<BlockQ6K>(&rhs)?;
|
||||||
@ -359,11 +467,11 @@ fn quantized_matmul_q6k() -> Result<()> {
|
|||||||
|
|
||||||
assert_eq!(mm.dims(), [m, n]);
|
assert_eq!(mm.dims(), [m, n]);
|
||||||
let dst = mm.flatten_all()?.to_vec1::<f32>()?;
|
let dst = mm.flatten_all()?.to_vec1::<f32>()?;
|
||||||
let dst = [dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]
|
let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]);
|
||||||
.iter()
|
|
||||||
.map(|x| (1000. * x).round() / 1000.)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
assert_eq!(dst, [1.324, 1.49, -0.164, 1.741]);
|
assert_eq!(dst, [1.324, 1.49, -0.164, 1.741]);
|
||||||
|
|
||||||
|
//mirrored GGML unit test
|
||||||
|
ggml_matmul_error_test::<BlockQ6K>()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user