mirror of
https://github.com/huggingface/candle.git
synced 2025-06-19 03:54:56 +00:00
Split out the quantized file. (#456)
This commit is contained in:
294
candle-core/src/quantized/ggml_file.rs
Normal file
294
candle-core/src/quantized/ggml_file.rs
Normal file
@ -0,0 +1,294 @@
|
||||
//! Support for the GGML file format.
|
||||
|
||||
use super::{k_quants, GgmlDType};
|
||||
use crate::{DType, Device, Result, Tensor};
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.h#L37
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
enum Magic {
|
||||
Ggjt,
|
||||
Ggla,
|
||||
Ggmf,
|
||||
Ggml,
|
||||
Ggsn,
|
||||
}
|
||||
|
||||
impl TryFrom<u32> for Magic {
|
||||
type Error = crate::Error;
|
||||
fn try_from(value: u32) -> Result<Self> {
|
||||
let magic = match value {
|
||||
0x67676a74 => Self::Ggjt,
|
||||
0x67676c61 => Self::Ggla,
|
||||
0x67676d66 => Self::Ggmf,
|
||||
0x67676d6c => Self::Ggml,
|
||||
0x6767736e => Self::Ggsn,
|
||||
_ => crate::bail!("unknown magic {value:08x}"),
|
||||
};
|
||||
Ok(magic)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum VersionedMagic {
|
||||
GgmlUnversioned,
|
||||
GgmfV1,
|
||||
GgjtV1,
|
||||
GgjtV2,
|
||||
GgjtV3,
|
||||
}
|
||||
|
||||
impl VersionedMagic {
|
||||
fn read<R: std::io::Read>(reader: &mut R) -> Result<Self> {
|
||||
let magic = reader.read_u32::<LittleEndian>()?;
|
||||
let magic = Magic::try_from(magic)?;
|
||||
if magic == Magic::Ggml {
|
||||
return Ok(Self::GgmlUnversioned);
|
||||
}
|
||||
let version = reader.read_u32::<LittleEndian>()?;
|
||||
let versioned_magic = match (magic, version) {
|
||||
(Magic::Ggmf, 1) => Self::GgmfV1,
|
||||
(Magic::Ggjt, 1) => Self::GgjtV1,
|
||||
(Magic::Ggjt, 2) => Self::GgjtV2,
|
||||
(Magic::Ggjt, 3) => Self::GgjtV3,
|
||||
_ => crate::bail!("ggml: unsupported magic/version {magic:?}/{version}"),
|
||||
};
|
||||
Ok(versioned_magic)
|
||||
}
|
||||
|
||||
fn align32(&self) -> bool {
|
||||
match self {
|
||||
Self::GgmlUnversioned | Self::GgmfV1 => false,
|
||||
Self::GgjtV1 | Self::GgjtV2 | Self::GgjtV3 => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct HParams {
|
||||
pub n_vocab: u32,
|
||||
pub n_embd: u32,
|
||||
pub n_mult: u32,
|
||||
pub n_head: u32,
|
||||
pub n_layer: u32,
|
||||
pub n_rot: u32,
|
||||
pub ftype: u32,
|
||||
}
|
||||
|
||||
impl HParams {
|
||||
fn read<R: std::io::Read>(reader: &mut R) -> Result<Self> {
|
||||
let n_vocab = reader.read_u32::<LittleEndian>()?;
|
||||
let n_embd = reader.read_u32::<LittleEndian>()?;
|
||||
let n_mult = reader.read_u32::<LittleEndian>()?;
|
||||
let n_head = reader.read_u32::<LittleEndian>()?;
|
||||
let n_layer = reader.read_u32::<LittleEndian>()?;
|
||||
let n_rot = reader.read_u32::<LittleEndian>()?;
|
||||
let ftype = reader.read_u32::<LittleEndian>()?;
|
||||
Ok(Self {
|
||||
n_vocab,
|
||||
n_embd,
|
||||
n_mult,
|
||||
n_head,
|
||||
n_layer,
|
||||
n_rot,
|
||||
ftype,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Vocab {
|
||||
pub token_score_pairs: Vec<(Vec<u8>, f32)>,
|
||||
}
|
||||
|
||||
impl Vocab {
|
||||
fn read<R: std::io::Read>(reader: &mut R, n_vocab: usize) -> Result<Self> {
|
||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.cpp#L556
|
||||
let mut token_score_pairs = Vec::with_capacity(n_vocab);
|
||||
for _index in 0..n_vocab {
|
||||
let len = reader.read_u32::<LittleEndian>()? as usize;
|
||||
let mut word = vec![0u8; len];
|
||||
reader.read_exact(&mut word)?;
|
||||
let score = reader.read_f32::<LittleEndian>()?;
|
||||
token_score_pairs.push((word, score))
|
||||
}
|
||||
Ok(Self { token_score_pairs })
|
||||
}
|
||||
}
|
||||
|
||||
fn dequantize_and_create_tensor<T: super::GgmlType>(
|
||||
raw_data: &[u8],
|
||||
tensor_elems: usize,
|
||||
size_in_bytes: usize,
|
||||
dims: Vec<usize>,
|
||||
device: &Device,
|
||||
) -> Result<Tensor> {
|
||||
let mut f32_data = vec![0f32; tensor_elems];
|
||||
let raw_data_ptr = raw_data.as_ptr();
|
||||
let n_blocks = size_in_bytes / std::mem::size_of::<T>();
|
||||
let raw_data = unsafe { std::slice::from_raw_parts(raw_data_ptr as *const T, n_blocks) };
|
||||
T::to_float(raw_data, &mut f32_data)?;
|
||||
Tensor::from_vec(f32_data, dims, device)
|
||||
}
|
||||
|
||||
/// Creates a [Tensor] from a raw GGML tensor.
|
||||
pub fn tensor_from_ggml(
|
||||
ggml_dtype: GgmlDType,
|
||||
raw_data: &[u8],
|
||||
dims: Vec<usize>,
|
||||
dtype: DType,
|
||||
device: &Device,
|
||||
) -> Result<Tensor> {
|
||||
let tensor_elems = dims.iter().product::<usize>();
|
||||
let size_in_bytes = tensor_elems * ggml_dtype.type_size() / ggml_dtype.blck_size();
|
||||
|
||||
let tensor = match ggml_dtype {
|
||||
GgmlDType::F32 => Tensor::from_raw_buffer(raw_data, DType::F32, &dims, device),
|
||||
GgmlDType::F16 => Tensor::from_raw_buffer(raw_data, DType::F16, &dims, device),
|
||||
GgmlDType::Q4_0 => dequantize_and_create_tensor::<k_quants::BlockQ4_0>(
|
||||
raw_data,
|
||||
tensor_elems,
|
||||
size_in_bytes,
|
||||
dims,
|
||||
device,
|
||||
),
|
||||
GgmlDType::Q4_1 => dequantize_and_create_tensor::<k_quants::BlockQ4_1>(
|
||||
raw_data,
|
||||
tensor_elems,
|
||||
size_in_bytes,
|
||||
dims,
|
||||
device,
|
||||
),
|
||||
GgmlDType::Q5_0 => dequantize_and_create_tensor::<k_quants::BlockQ5_0>(
|
||||
raw_data,
|
||||
tensor_elems,
|
||||
size_in_bytes,
|
||||
dims,
|
||||
device,
|
||||
),
|
||||
GgmlDType::Q5_1 => dequantize_and_create_tensor::<k_quants::BlockQ5_1>(
|
||||
raw_data,
|
||||
tensor_elems,
|
||||
size_in_bytes,
|
||||
dims,
|
||||
device,
|
||||
),
|
||||
GgmlDType::Q8_0 => dequantize_and_create_tensor::<k_quants::BlockQ8_0>(
|
||||
raw_data,
|
||||
tensor_elems,
|
||||
size_in_bytes,
|
||||
dims,
|
||||
device,
|
||||
),
|
||||
GgmlDType::Q2K => dequantize_and_create_tensor::<k_quants::BlockQ2K>(
|
||||
raw_data,
|
||||
tensor_elems,
|
||||
size_in_bytes,
|
||||
dims,
|
||||
device,
|
||||
),
|
||||
GgmlDType::Q3K => dequantize_and_create_tensor::<k_quants::BlockQ3K>(
|
||||
raw_data,
|
||||
tensor_elems,
|
||||
size_in_bytes,
|
||||
dims,
|
||||
device,
|
||||
),
|
||||
GgmlDType::Q4K => dequantize_and_create_tensor::<k_quants::BlockQ4K>(
|
||||
raw_data,
|
||||
tensor_elems,
|
||||
size_in_bytes,
|
||||
dims,
|
||||
device,
|
||||
),
|
||||
GgmlDType::Q5K => dequantize_and_create_tensor::<k_quants::BlockQ5K>(
|
||||
raw_data,
|
||||
tensor_elems,
|
||||
size_in_bytes,
|
||||
dims,
|
||||
device,
|
||||
),
|
||||
GgmlDType::Q6K => dequantize_and_create_tensor::<k_quants::BlockQ6K>(
|
||||
raw_data,
|
||||
tensor_elems,
|
||||
size_in_bytes,
|
||||
dims,
|
||||
device,
|
||||
),
|
||||
_ => crate::bail!("quantized type {dtype:?} is not supported yet"),
|
||||
}?;
|
||||
//We only have ggml-quant to f32 conversions, meaning we have to convert to the desired type
|
||||
if tensor.dtype() != dtype {
|
||||
tensor.to_dtype(dtype)
|
||||
} else {
|
||||
Ok(tensor)
|
||||
}
|
||||
}
|
||||
|
||||
fn read_one_tensor<R: std::io::Seek + std::io::Read>(
|
||||
reader: &mut R,
|
||||
magic: VersionedMagic,
|
||||
dtype: DType,
|
||||
device: &Device,
|
||||
) -> Result<(String, Tensor)> {
|
||||
let n_dims = reader.read_u32::<LittleEndian>()?;
|
||||
let name_len = reader.read_u32::<LittleEndian>()?;
|
||||
let ggml_dtype = reader.read_u32::<LittleEndian>()?;
|
||||
let ggml_dtype = GgmlDType::from_u32(ggml_dtype)?;
|
||||
let mut dims = vec![0u32; n_dims as usize];
|
||||
reader.read_u32_into::<LittleEndian>(&mut dims)?;
|
||||
let mut name = vec![0u8; name_len as usize];
|
||||
reader.read_exact(&mut name)?;
|
||||
let name = String::from_utf8_lossy(&name).into_owned();
|
||||
|
||||
if magic.align32() {
|
||||
let pos = reader.stream_position()?;
|
||||
reader.seek(std::io::SeekFrom::Current(((32 - pos % 32) % 32) as i64))?;
|
||||
}
|
||||
let dims = dims.iter().map(|&u| u as usize).collect::<Vec<_>>();
|
||||
let tensor_elems = dims.iter().product::<usize>();
|
||||
let size_in_bytes = tensor_elems * ggml_dtype.type_size() / ggml_dtype.blck_size();
|
||||
println!("{name} {ggml_dtype:?} {dims:?}");
|
||||
// TODO: Mmap version to avoid copying the data around?
|
||||
let mut raw_data = vec![0u8; size_in_bytes];
|
||||
reader.read_exact(&mut raw_data)?;
|
||||
match tensor_from_ggml(ggml_dtype, &raw_data, dims, dtype, device) {
|
||||
Ok(tensor) => Ok((name, tensor)),
|
||||
Err(e) => crate::bail!("Error creating tensor {name}: {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Content {
|
||||
pub magic: VersionedMagic,
|
||||
pub hparams: HParams,
|
||||
pub vocab: Vocab,
|
||||
pub tensors: Vec<(String, Tensor)>,
|
||||
}
|
||||
|
||||
impl Content {
|
||||
pub fn read<R: std::io::Seek + std::io::Read>(
|
||||
reader: &mut R,
|
||||
dtype: DType,
|
||||
device: &Device,
|
||||
) -> Result<Content> {
|
||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.cpp#L505
|
||||
let last_position = reader.seek(std::io::SeekFrom::End(0))?;
|
||||
reader.seek(std::io::SeekFrom::Start(0))?;
|
||||
let magic = VersionedMagic::read(reader)?;
|
||||
let hparams = HParams::read(reader)?;
|
||||
let vocab = Vocab::read(reader, hparams.n_vocab as usize)?;
|
||||
let mut tensors = vec![];
|
||||
|
||||
while reader.stream_position()? != last_position {
|
||||
let (name, tensor) = read_one_tensor(reader, magic, dtype, device)?;
|
||||
tensors.push((name, tensor))
|
||||
}
|
||||
Ok(Self {
|
||||
magic,
|
||||
hparams,
|
||||
vocab,
|
||||
tensors,
|
||||
})
|
||||
}
|
||||
}
|
728
candle-core/src/quantized/k_quants.rs
Normal file
728
candle-core/src/quantized/k_quants.rs
Normal file
@ -0,0 +1,728 @@
|
||||
use super::GgmlDType;
|
||||
use crate::Result;
|
||||
use half::f16;
|
||||
|
||||
// Default to QK_K 256 rather than 64.
|
||||
pub const QK_K: usize = 256;
|
||||
pub const K_SCALE_SIZE: usize = 12;
|
||||
|
||||
pub const QK4_0: usize = 32;
|
||||
pub const QK4_1: usize = 32;
|
||||
pub const QK5_0: usize = 32;
|
||||
pub const QK5_1: usize = 32;
|
||||
pub const QK8_0: usize = 32;
|
||||
pub const QK8_1: usize = 32;
|
||||
|
||||
pub trait GgmlType: Sized + Clone {
|
||||
const DTYPE: GgmlDType;
|
||||
const BLCK_SIZE: usize;
|
||||
type VecDotType: GgmlType;
|
||||
|
||||
// This is only safe for types that include immediate values such as float/int/...
|
||||
fn zeros() -> Self {
|
||||
unsafe { std::mem::MaybeUninit::zeroed().assume_init() }
|
||||
}
|
||||
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()>;
|
||||
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()>;
|
||||
|
||||
/// Dot product used as a building block for quantized mat-mul.
|
||||
/// n is the number of elements to be considered.
|
||||
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct BlockQ4_0 {
|
||||
d: f16,
|
||||
qs: [u8; QK4_0 / 2],
|
||||
}
|
||||
const _: () = assert!(std::mem::size_of::<BlockQ4_0>() == 18);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct BlockQ4_1 {
|
||||
d: f16,
|
||||
m: f16,
|
||||
qs: [u8; QK4_1 / 2],
|
||||
}
|
||||
const _: () = assert!(std::mem::size_of::<BlockQ4_1>() == 20);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct BlockQ5_0 {
|
||||
d: f16,
|
||||
qh: [u8; 4],
|
||||
qs: [u8; QK5_0 / 2],
|
||||
}
|
||||
const _: () = assert!(std::mem::size_of::<BlockQ5_0>() == 22);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct BlockQ5_1 {
|
||||
d: f16,
|
||||
m: f16,
|
||||
qh: [u8; 4],
|
||||
qs: [u8; QK5_1 / 2],
|
||||
}
|
||||
const _: () = assert!(std::mem::size_of::<BlockQ5_1>() == 24);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct BlockQ8_0 {
|
||||
d: f16,
|
||||
qs: [u8; QK8_0],
|
||||
}
|
||||
const _: () = assert!(std::mem::size_of::<BlockQ8_0>() == 34);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct BlockQ8_1 {
|
||||
d: f16,
|
||||
s: f16,
|
||||
qs: [u8; QK8_1],
|
||||
}
|
||||
const _: () = assert!(std::mem::size_of::<BlockQ8_1>() == 36);
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct BlockQ2K {
|
||||
scales: [u8; QK_K / 16],
|
||||
qs: [u8; QK_K / 4],
|
||||
d: f16,
|
||||
dmin: f16,
|
||||
}
|
||||
const _: () = assert!(QK_K / 16 + QK_K / 4 + 2 * 2 == std::mem::size_of::<BlockQ2K>());
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct BlockQ3K {
|
||||
hmask: [u8; QK_K / 8],
|
||||
qs: [u8; QK_K / 4],
|
||||
scales: [u8; 12],
|
||||
d: f16,
|
||||
}
|
||||
const _: () = assert!(QK_K / 8 + QK_K / 4 + 12 + 2 == std::mem::size_of::<BlockQ3K>());
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/k_quants.h#L82
|
||||
#[repr(C)]
|
||||
pub struct BlockQ4K {
|
||||
d: f16,
|
||||
dmin: f16,
|
||||
scales: [u8; K_SCALE_SIZE],
|
||||
qs: [u8; QK_K / 2],
|
||||
}
|
||||
const _: () = assert!(QK_K / 2 + K_SCALE_SIZE + 2 * 2 == std::mem::size_of::<BlockQ4K>());
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct BlockQ5K {
|
||||
d: f16,
|
||||
dmin: f16,
|
||||
scales: [u8; K_SCALE_SIZE],
|
||||
qh: [u8; QK_K / 8],
|
||||
qs: [u8; QK_K / 2],
|
||||
}
|
||||
const _: () =
|
||||
assert!(QK_K / 8 + QK_K / 2 + 2 * 2 + K_SCALE_SIZE == std::mem::size_of::<BlockQ5K>());
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct BlockQ6K {
|
||||
ql: [u8; QK_K / 2],
|
||||
qh: [u8; QK_K / 4],
|
||||
scales: [i8; QK_K / 16],
|
||||
d: f16,
|
||||
}
|
||||
const _: () = assert!(3 * QK_K / 4 + QK_K / 16 + 2 == std::mem::size_of::<BlockQ6K>());
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct BlockQ8K {
|
||||
d: f32,
|
||||
qs: [i8; QK_K],
|
||||
bsums: [i16; QK_K / 16],
|
||||
}
|
||||
const _: () = assert!(4 + QK_K + QK_K / 16 * 2 == std::mem::size_of::<BlockQ8K>());
|
||||
|
||||
impl GgmlType for BlockQ4_1 {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q4_1;
|
||||
const BLCK_SIZE: usize = QK4_1;
|
||||
type VecDotType = BlockQ8_1;
|
||||
|
||||
fn vec_dot(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn from_float(_xs: &[f32], _ys: &mut [Self]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1545
|
||||
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
|
||||
let k = ys.len();
|
||||
if k % QK4_1 != 0 {
|
||||
crate::bail!("dequantize_row_q4_1: {k} is not divisible by {QK4_1}");
|
||||
}
|
||||
|
||||
let nb = k / QK4_1;
|
||||
for i in 0..nb {
|
||||
let d = xs[i].d.to_f32();
|
||||
let m = xs[i].m.to_f32();
|
||||
|
||||
for j in 0..(QK4_1 / 2) {
|
||||
let x0 = xs[i].qs[j] & 0x0F;
|
||||
let x1 = xs[i].qs[j] >> 4;
|
||||
|
||||
ys[i * QK4_1 + j] = (x0 as f32) * d + m;
|
||||
ys[i * QK4_1 + j + QK4_1 / 2] = (x1 as f32) * d + m;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl GgmlType for BlockQ5_0 {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q5_0;
|
||||
const BLCK_SIZE: usize = QK5_0;
|
||||
type VecDotType = BlockQ8_0;
|
||||
|
||||
fn vec_dot(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn from_float(_xs: &[f32], _ys: &mut [Self]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1566
|
||||
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
|
||||
let k = ys.len();
|
||||
if k % QK5_0 != 0 {
|
||||
crate::bail!("dequantize_row_q5_0: {k} is not divisible by {QK5_0}");
|
||||
}
|
||||
|
||||
let nb = k / QK5_0;
|
||||
for i in 0..nb {
|
||||
let d = xs[i].d.to_f32();
|
||||
let qh: u32 = unsafe { std::mem::transmute_copy(&xs[i].qh) };
|
||||
|
||||
for j in 0..(QK5_0 / 2) {
|
||||
let xh_0 = (((qh >> j) << 4) & 0x10) as u8;
|
||||
let xh_1 = ((qh >> (j + 12)) & 0x10) as u8;
|
||||
|
||||
let x0 = ((xs[i].qs[j] & 0x0F) | xh_0) as i32 - 16;
|
||||
let x1 = ((xs[i].qs[j] >> 4) | xh_1) as i32 - 16;
|
||||
|
||||
ys[i * QK5_0 + j] = (x0 as f32) * d;
|
||||
ys[i * QK5_0 + j + QK5_0 / 2] = (x1 as f32) * d;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl GgmlType for BlockQ5_1 {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q5_1;
|
||||
const BLCK_SIZE: usize = QK5_1;
|
||||
type VecDotType = BlockQ8_1;
|
||||
|
||||
fn vec_dot(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn from_float(_xs: &[f32], _ys: &mut [Self]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1592
|
||||
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
|
||||
let k = ys.len();
|
||||
if k % QK5_1 != 0 {
|
||||
crate::bail!("dequantize_row_q5_1: {k} is not divisible by {QK5_1}");
|
||||
}
|
||||
|
||||
let nb = k / QK5_1;
|
||||
for i in 0..nb {
|
||||
let d = xs[i].d.to_f32();
|
||||
let m = xs[i].m.to_f32();
|
||||
let qh: u32 = unsafe { std::mem::transmute_copy(&xs[i].qh) };
|
||||
|
||||
for j in 0..(QK5_1 / 2) {
|
||||
let xh_0 = (((qh >> j) << 4) & 0x10) as u8;
|
||||
let xh_1 = ((qh >> (j + 12)) & 0x10) as u8;
|
||||
|
||||
let x0 = (xs[i].qs[j] & 0x0F) | xh_0;
|
||||
let x1 = (xs[i].qs[j] >> 4) | xh_1;
|
||||
|
||||
ys[i * QK5_1 + j] = (x0 as f32) * d + m;
|
||||
ys[i * QK5_1 + j + QK5_1 / 2] = (x1 as f32) * d + m;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl GgmlType for BlockQ2K {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q2K;
|
||||
const BLCK_SIZE: usize = QK_K;
|
||||
type VecDotType = BlockQ8K;
|
||||
|
||||
fn vec_dot(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn from_float(_xs: &[f32], _ys: &mut [Self]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L354
|
||||
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
|
||||
let k = ys.len();
|
||||
if k % QK_K != 0 {
|
||||
crate::bail!("dequantize_row_q2k: {k} is not divisible by {QK_K}")
|
||||
}
|
||||
let mut ys_index = 0;
|
||||
for x in xs {
|
||||
let d = x.d.to_f32();
|
||||
let min = x.dmin.to_f32();
|
||||
let q = &x.qs;
|
||||
|
||||
let mut is = 0;
|
||||
for n in (0..QK_K).step_by(128) {
|
||||
// Step by 32 over q.
|
||||
let q = &q[n / 4..];
|
||||
let mut shift = 0;
|
||||
for _j in 0..4 {
|
||||
let sc = x.scales[is];
|
||||
is += 1;
|
||||
let dl = d * (sc & 0xF) as f32;
|
||||
let ml = min * (sc >> 4) as f32;
|
||||
for q in &q[..16] {
|
||||
let y = dl * ((q >> shift) & 3) as i8 as f32 - ml;
|
||||
ys[ys_index] = y;
|
||||
ys_index += 1;
|
||||
}
|
||||
|
||||
let sc = x.scales[is];
|
||||
is += 1;
|
||||
let dl = d * (sc & 0xF) as f32;
|
||||
let ml = min * (sc >> 4) as f32;
|
||||
for q in &q[16..32] {
|
||||
let y = dl * ((q >> shift) & 3) as i8 as f32 - ml;
|
||||
ys[ys_index] = y;
|
||||
ys_index += 1;
|
||||
}
|
||||
|
||||
shift += 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn get_scale_min_k4(j: usize, q: &[u8]) -> (u8, u8) {
|
||||
if j < 4 {
|
||||
let d = q[j] & 63;
|
||||
let m = q[j + 4] & 63;
|
||||
(d, m)
|
||||
} else {
|
||||
let d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4);
|
||||
let m = (q[j + 4] >> 4) | ((q[j] >> 6) << 4);
|
||||
(d, m)
|
||||
}
|
||||
}
|
||||
|
||||
impl GgmlType for BlockQ4K {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q4K;
|
||||
const BLCK_SIZE: usize = QK_K;
|
||||
type VecDotType = BlockQ8K;
|
||||
|
||||
fn vec_dot(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn from_float(_xs: &[f32], _ys: &mut [Self]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L735
|
||||
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
|
||||
let k = ys.len();
|
||||
if k % QK_K != 0 {
|
||||
crate::bail!("dequantize_row_q4k: {k} is not divisible by {QK_K}")
|
||||
}
|
||||
let mut ys_index = 0;
|
||||
for x in xs.iter() {
|
||||
let d = x.d.to_f32();
|
||||
let min = x.dmin.to_f32();
|
||||
let q = &x.qs;
|
||||
let mut is = 0;
|
||||
for j in (0..QK_K).step_by(64) {
|
||||
let q = &q[j / 2..j / 2 + 32];
|
||||
let (sc, m) = get_scale_min_k4(is, &x.scales);
|
||||
let d1 = d * sc as f32;
|
||||
let m1 = min * m as f32;
|
||||
let (sc, m) = get_scale_min_k4(is + 1, &x.scales);
|
||||
let d2 = d * sc as f32;
|
||||
let m2 = min * m as f32;
|
||||
for q in q {
|
||||
let y = d1 * (q & 0xF) as f32 - m1;
|
||||
ys[ys_index] = y;
|
||||
ys_index += 1;
|
||||
}
|
||||
for q in q {
|
||||
let y = d2 * (q >> 4) as f32 - m2;
|
||||
ys[ys_index] = y;
|
||||
ys_index += 1;
|
||||
}
|
||||
is += 2;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl GgmlType for BlockQ3K {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q3K;
|
||||
const BLCK_SIZE: usize = QK_K;
|
||||
type VecDotType = BlockQ8K;
|
||||
|
||||
fn vec_dot(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn from_float(_xs: &[f32], _ys: &mut [Self]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L533
|
||||
fn to_float(_xs: &[Self], _ys: &mut [f32]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L928
|
||||
impl GgmlType for BlockQ5K {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q5K;
|
||||
const BLCK_SIZE: usize = QK_K;
|
||||
type VecDotType = BlockQ8K;
|
||||
|
||||
fn vec_dot(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn from_float(_xs: &[f32], _ys: &mut [Self]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
|
||||
let k = ys.len();
|
||||
if k % QK_K != 0 {
|
||||
crate::bail!("dequantize_row_q5k: {k} is not divisible by {QK_K}")
|
||||
}
|
||||
let mut ys_index = 0;
|
||||
for x in xs.iter() {
|
||||
let d = x.d.to_f32();
|
||||
let min = x.dmin.to_f32();
|
||||
let ql = &x.qs;
|
||||
let qh = &x.qh;
|
||||
let mut is = 0;
|
||||
let mut u1 = 1;
|
||||
let mut u2 = 2;
|
||||
for j in (0..QK_K).step_by(64) {
|
||||
let ql = &ql[j / 2..j / 2 + 32];
|
||||
let (sc, m) = get_scale_min_k4(is, &x.scales);
|
||||
let d1 = d * sc as f32;
|
||||
let m1 = min * m as f32;
|
||||
let (sc, m) = get_scale_min_k4(is + 1, &x.scales);
|
||||
let d2 = d * sc as f32;
|
||||
let m2 = min * m as f32;
|
||||
for (ql, qh) in ql.iter().zip(qh) {
|
||||
let to_add = if qh & u1 != 0 { 16 } else { 1 };
|
||||
let y = d1 * ((ql & 0xF) + to_add) as f32 - m1;
|
||||
ys[ys_index] = y;
|
||||
ys_index += 1;
|
||||
}
|
||||
for (ql, qh) in ql.iter().zip(qh) {
|
||||
let to_add = if qh & u2 != 0 { 16 } else { 1 };
|
||||
let y = d2 * ((ql >> 4) + to_add) as f32 - m2;
|
||||
ys[ys_index] = y;
|
||||
ys_index += 1;
|
||||
}
|
||||
is += 2;
|
||||
u1 <<= 2;
|
||||
u2 <<= 2;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl GgmlType for BlockQ6K {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q6K;
|
||||
const BLCK_SIZE: usize = QK_K;
|
||||
type VecDotType = BlockQ8K;
|
||||
|
||||
fn vec_dot(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn from_float(_xs: &[f32], _ys: &mut [Self]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L1067
|
||||
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
|
||||
let k = ys.len();
|
||||
if k % QK_K != 0 {
|
||||
crate::bail!("dequantize_row_q6k: {k} is not divisible by {QK_K}")
|
||||
}
|
||||
for x in xs.iter() {
|
||||
let d = x.d.to_f32();
|
||||
let ql = &x.ql;
|
||||
let qh = &x.qh;
|
||||
let sc = &x.scales;
|
||||
for n in (0..QK_K).step_by(128) {
|
||||
let idx = n / 128;
|
||||
let ys = &mut ys[n..];
|
||||
let sc = &sc[8 * idx..];
|
||||
let ql = &ql[64 * idx..];
|
||||
let qh = &qh[32 * idx..];
|
||||
for l in 0..32 {
|
||||
let is = l / 16;
|
||||
let q1 = ((ql[l] & 0xF) | ((qh[l] & 3) << 4)) as i8 - 32;
|
||||
let q2 = ((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) as i8 - 32;
|
||||
let q3 = ((ql[l] >> 4) | (((qh[l] >> 4) & 3) << 4)) as i8 - 32;
|
||||
let q4 = ((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) as i8 - 32;
|
||||
ys[l] = d * sc[is] as f32 * q1 as f32;
|
||||
ys[l + 32] = d * sc[is + 2] as f32 * q2 as f32;
|
||||
ys[l + 64] = d * sc[is + 4] as f32 * q3 as f32;
|
||||
ys[l + 96] = d * sc[is + 6] as f32 * q4 as f32;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl GgmlType for BlockQ8K {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q8K;
|
||||
const BLCK_SIZE: usize = QK_K;
|
||||
type VecDotType = BlockQ8K;
|
||||
|
||||
fn vec_dot(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn from_float(_xs: &[f32], _ys: &mut [Self]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L533
|
||||
fn to_float(_xs: &[Self], _ys: &mut [f32]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl GgmlType for BlockQ4_0 {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q4_0;
|
||||
const BLCK_SIZE: usize = QK4_0;
|
||||
type VecDotType = BlockQ8_0;
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1525
|
||||
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
|
||||
let k = ys.len();
|
||||
if k % QK4_0 != 0 {
|
||||
crate::bail!("dequantize_row_q4_0: {k} is not divisible by {QK4_0}")
|
||||
}
|
||||
|
||||
let nb = k / QK4_0;
|
||||
for i in 0..nb {
|
||||
let d = xs[i].d.to_f32();
|
||||
|
||||
for j in 0..(QK4_0 / 2) {
|
||||
let x0 = (xs[i].qs[j] & 0x0F) as i16 - 8;
|
||||
let x1 = (xs[i].qs[j] >> 4) as i16 - 8;
|
||||
|
||||
ys[i * QK4_0 + j] = (x0 as f32) * d;
|
||||
ys[i * QK4_0 + j + QK4_0 / 2] = (x1 as f32) * d;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
|
||||
// quantize_row_q4_0
|
||||
let qk = Self::BLCK_SIZE;
|
||||
let k = xs.len();
|
||||
if k % qk != 0 {
|
||||
crate::bail!("{k} is not divisible by {}", qk);
|
||||
};
|
||||
let nb = k / qk;
|
||||
if ys.len() != nb {
|
||||
crate::bail!("size mismatch {} {} {}", xs.len(), ys.len(), qk,)
|
||||
}
|
||||
for (i, ys) in ys.iter_mut().enumerate() {
|
||||
let mut amax = 0f32;
|
||||
let mut max = 0f32;
|
||||
|
||||
let xs = &xs[i * qk..(i + 1) * qk];
|
||||
for &x in xs.iter() {
|
||||
if amax < x.abs() {
|
||||
amax = x.abs();
|
||||
max = x;
|
||||
}
|
||||
}
|
||||
let d = max / -8.0;
|
||||
let id = if d != 0f32 { 1. / d } else { 0. };
|
||||
ys.d = f16::from_f32(d);
|
||||
|
||||
for (j, q) in ys.qs.iter_mut().enumerate() {
|
||||
let x0 = xs[j] * id;
|
||||
let x1 = xs[qk / 2 + j] * id;
|
||||
let xi0 = u8::min(15, (x0 + 8.5) as u8);
|
||||
let xi1 = u8::min(15, (x1 + 8.5) as u8);
|
||||
*q = xi0 | (xi1 << 4)
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/b5ffb2849d23afe73647f68eec7b68187af09be6/ggml.c#L2361C10-L2361C122
|
||||
fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> {
|
||||
let qk = QK8_0;
|
||||
let nb = n / qk;
|
||||
if n % QK8_0 != 0 {
|
||||
crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}")
|
||||
}
|
||||
if nb % 2 != 0 {
|
||||
crate::bail!("vec_dot_q4_0_q8_0: {nb} is not even")
|
||||
}
|
||||
|
||||
// Generic implementation.
|
||||
let mut sumf = 0f32;
|
||||
for i in 0..nb {
|
||||
let mut sum_i = 0;
|
||||
for j in 0..qk / 2 {
|
||||
let v0 = (xs[i].qs[j] & 0x0F) as i32 - 8;
|
||||
let v1 = (xs[i].qs[j] >> 4) as i32 - 8;
|
||||
sum_i += v0 * ys[i].qs[j] as i32 + v1 * ys[i].qs[j + qk / 2] as i32
|
||||
}
|
||||
sumf += sum_i as f32 * f16::to_f32(xs[i].d) * f16::to_f32(ys[i].d)
|
||||
}
|
||||
Ok(sumf)
|
||||
}
|
||||
}
|
||||
|
||||
impl GgmlType for BlockQ8_0 {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q8_0;
|
||||
const BLCK_SIZE: usize = QK8_0;
|
||||
type VecDotType = BlockQ8_0;
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1619
|
||||
fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> {
|
||||
let k = ys.len();
|
||||
if k % QK8_0 != 0 {
|
||||
crate::bail!("dequantize_row_q8_0: {k} is not divisible by {QK8_0}");
|
||||
}
|
||||
|
||||
let nb = k / QK8_0;
|
||||
|
||||
for i in 0..nb {
|
||||
let d = xs[i].d.to_f32();
|
||||
|
||||
for j in 0..QK8_0 {
|
||||
ys[i * QK8_0 + j] = xs[i].qs[j] as f32 * d;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> {
|
||||
// quantize_row_q8_0
|
||||
let k = xs.len();
|
||||
if k % Self::BLCK_SIZE != 0 {
|
||||
crate::bail!("{k} is not divisible by {}", Self::BLCK_SIZE);
|
||||
};
|
||||
let nb = k / Self::BLCK_SIZE;
|
||||
if ys.len() != nb {
|
||||
crate::bail!(
|
||||
"size mismatch {} {} {}",
|
||||
xs.len(),
|
||||
ys.len(),
|
||||
Self::BLCK_SIZE
|
||||
)
|
||||
}
|
||||
for (i, ys) in ys.iter_mut().enumerate() {
|
||||
let mut amax = 0f32;
|
||||
let xs = &xs[i * Self::BLCK_SIZE..(i + 1) * Self::BLCK_SIZE];
|
||||
for &x in xs.iter() {
|
||||
amax = amax.max(x.abs())
|
||||
}
|
||||
let d = amax / ((1 << 7) - 1) as f32;
|
||||
let id = if d != 0f32 { 1. / d } else { 0. };
|
||||
ys.d = f16::from_f32(d);
|
||||
for (y, &x) in ys.qs.iter_mut().zip(xs.iter()) {
|
||||
*y = f32::round(x * id) as u8
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn vec_dot(_: usize, _: &[Self], _: &[Self::VecDotType]) -> Result<f32> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl GgmlType for BlockQ8_1 {
|
||||
const DTYPE: GgmlDType = GgmlDType::Q3K;
|
||||
const BLCK_SIZE: usize = QK_K;
|
||||
type VecDotType = BlockQ8_1;
|
||||
|
||||
fn vec_dot(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn from_float(_xs: &[f32], _ys: &mut [Self]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L533
|
||||
fn to_float(_xs: &[Self], _ys: &mut [f32]) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/ggerganov/llama.cpp/blob/b5ffb2849d23afe73647f68eec7b68187af09be6/ggml.c#L10605
|
||||
pub fn matmul<T: GgmlType>(
|
||||
mkn: (usize, usize, usize),
|
||||
lhs: &[f32],
|
||||
rhs_t: &[T],
|
||||
dst: &mut [f32],
|
||||
) -> Result<()> {
|
||||
let (m, k, n) = mkn;
|
||||
if m * k != lhs.len() {
|
||||
crate::bail!("unexpected lhs length {} {mkn:?}", lhs.len());
|
||||
}
|
||||
|
||||
let k_in_lhs_blocks = (k + T::BLCK_SIZE - 1) / T::BLCK_SIZE;
|
||||
let k_in_rhs_blocks = (k + T::VecDotType::BLCK_SIZE - 1) / T::VecDotType::BLCK_SIZE;
|
||||
// TODO: Do not make this copy if the DotType is f32.
|
||||
// TODO: Pre-allocate this.
|
||||
let mut lhs_b = vec![T::VecDotType::zeros(); m * k_in_lhs_blocks];
|
||||
for row_idx in 0..m {
|
||||
let lhs_b = &mut lhs_b[row_idx * k_in_lhs_blocks..(row_idx + 1) * k_in_lhs_blocks];
|
||||
let lhs = &lhs[row_idx * k..(row_idx + 1) * k];
|
||||
T::VecDotType::from_float(lhs, lhs_b)?
|
||||
}
|
||||
let lhs_b = lhs_b.as_slice();
|
||||
|
||||
for row_idx in 0..m {
|
||||
let lhs_row = &lhs_b[row_idx * k_in_lhs_blocks..(row_idx + 1) * k_in_lhs_blocks];
|
||||
let dst_row = &mut dst[row_idx * n..(row_idx + 1) * n];
|
||||
for (col_idx, dst) in dst_row.iter_mut().enumerate() {
|
||||
let rhs_col = &rhs_t[col_idx * k_in_rhs_blocks..(col_idx + 1) * k_in_rhs_blocks];
|
||||
*dst = T::vec_dot(k, rhs_col, lhs_row)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
82
candle-core/src/quantized/mod.rs
Normal file
82
candle-core/src/quantized/mod.rs
Normal file
@ -0,0 +1,82 @@
|
||||
use crate::Result;
|
||||
|
||||
pub mod ggml_file;
|
||||
pub mod k_quants;
|
||||
|
||||
pub use k_quants::GgmlType;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum GgmlDType {
|
||||
F32,
|
||||
F16,
|
||||
Q4_0,
|
||||
Q4_1,
|
||||
Q5_0,
|
||||
Q5_1,
|
||||
Q8_0,
|
||||
Q8_1,
|
||||
Q2K,
|
||||
Q3K,
|
||||
Q4K,
|
||||
Q5K,
|
||||
Q6K,
|
||||
Q8K,
|
||||
}
|
||||
|
||||
impl GgmlDType {
|
||||
pub(crate) fn from_u32(u: u32) -> Result<Self> {
|
||||
let dtype = match u {
|
||||
0 => Self::F32,
|
||||
1 => Self::F16,
|
||||
2 => Self::Q4_0,
|
||||
3 => Self::Q4_1,
|
||||
6 => Self::Q5_0,
|
||||
7 => Self::Q5_1,
|
||||
8 => Self::Q8_0,
|
||||
9 => Self::Q8_1,
|
||||
10 => Self::Q2K,
|
||||
11 => Self::Q3K,
|
||||
12 => Self::Q4K,
|
||||
13 => Self::Q5K,
|
||||
14 => Self::Q6K,
|
||||
15 => Self::Q8K,
|
||||
_ => crate::bail!("unknown dtype for tensor {u}"),
|
||||
};
|
||||
Ok(dtype)
|
||||
}
|
||||
|
||||
fn type_size(&self) -> usize {
|
||||
use k_quants::*;
|
||||
match self {
|
||||
Self::F32 => 4,
|
||||
Self::F16 => 2,
|
||||
Self::Q4_0 => std::mem::size_of::<BlockQ4_0>(),
|
||||
Self::Q4_1 => std::mem::size_of::<BlockQ4_1>(),
|
||||
Self::Q5_0 => std::mem::size_of::<BlockQ5_0>(),
|
||||
Self::Q5_1 => std::mem::size_of::<BlockQ5_1>(),
|
||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L932
|
||||
Self::Q8_0 => std::mem::size_of::<BlockQ8_0>(),
|
||||
Self::Q8_1 => std::mem::size_of::<BlockQ8_1>(),
|
||||
Self::Q2K => std::mem::size_of::<BlockQ2K>(),
|
||||
Self::Q3K => std::mem::size_of::<BlockQ3K>(),
|
||||
Self::Q4K => std::mem::size_of::<BlockQ4K>(),
|
||||
Self::Q5K => std::mem::size_of::<BlockQ5K>(),
|
||||
Self::Q6K => std::mem::size_of::<BlockQ6K>(),
|
||||
Self::Q8K => std::mem::size_of::<BlockQ8K>(),
|
||||
}
|
||||
}
|
||||
|
||||
fn blck_size(&self) -> usize {
|
||||
match self {
|
||||
Self::F32 => 1,
|
||||
Self::F16 => 1,
|
||||
Self::Q4_0 => k_quants::QK4_0,
|
||||
Self::Q4_1 => k_quants::QK4_1,
|
||||
Self::Q5_0 => k_quants::QK5_0,
|
||||
Self::Q5_1 => k_quants::QK5_1,
|
||||
Self::Q8_0 => k_quants::QK8_0,
|
||||
Self::Q8_1 => k_quants::QK8_1,
|
||||
Self::Q2K | Self::Q3K | Self::Q4K | Self::Q5K | Self::Q6K | Self::Q8K => k_quants::QK_K,
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user