mirror of
https://github.com/huggingface/candle.git
synced 2025-06-17 11:08:52 +00:00
Split out the quantized file. (#456)
This commit is contained in:
@ -50,13 +50,13 @@ pub mod display;
|
|||||||
mod dtype;
|
mod dtype;
|
||||||
mod dummy_cuda_backend;
|
mod dummy_cuda_backend;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod ggml;
|
|
||||||
mod indexer;
|
mod indexer;
|
||||||
pub mod layout;
|
pub mod layout;
|
||||||
#[cfg(feature = "mkl")]
|
#[cfg(feature = "mkl")]
|
||||||
mod mkl;
|
mod mkl;
|
||||||
pub mod npy;
|
pub mod npy;
|
||||||
mod op;
|
mod op;
|
||||||
|
pub mod quantized;
|
||||||
pub mod safetensors;
|
pub mod safetensors;
|
||||||
pub mod shape;
|
pub mod shape;
|
||||||
mod storage;
|
mod storage;
|
||||||
|
294
candle-core/src/quantized/ggml_file.rs
Normal file
294
candle-core/src/quantized/ggml_file.rs
Normal file
@ -0,0 +1,294 @@
|
|||||||
|
//! Support for the GGML file format.
|
||||||
|
|
||||||
|
use super::{k_quants, GgmlDType};
|
||||||
|
use crate::{DType, Device, Result, Tensor};
|
||||||
|
use byteorder::{LittleEndian, ReadBytesExt};
|
||||||
|
|
||||||
|
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.h#L37
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
enum Magic {
|
||||||
|
Ggjt,
|
||||||
|
Ggla,
|
||||||
|
Ggmf,
|
||||||
|
Ggml,
|
||||||
|
Ggsn,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<u32> for Magic {
|
||||||
|
type Error = crate::Error;
|
||||||
|
fn try_from(value: u32) -> Result<Self> {
|
||||||
|
let magic = match value {
|
||||||
|
0x67676a74 => Self::Ggjt,
|
||||||
|
0x67676c61 => Self::Ggla,
|
||||||
|
0x67676d66 => Self::Ggmf,
|
||||||
|
0x67676d6c => Self::Ggml,
|
||||||
|
0x6767736e => Self::Ggsn,
|
||||||
|
_ => crate::bail!("unknown magic {value:08x}"),
|
||||||
|
};
|
||||||
|
Ok(magic)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum VersionedMagic {
|
||||||
|
GgmlUnversioned,
|
||||||
|
GgmfV1,
|
||||||
|
GgjtV1,
|
||||||
|
GgjtV2,
|
||||||
|
GgjtV3,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VersionedMagic {
|
||||||
|
fn read<R: std::io::Read>(reader: &mut R) -> Result<Self> {
|
||||||
|
let magic = reader.read_u32::<LittleEndian>()?;
|
||||||
|
let magic = Magic::try_from(magic)?;
|
||||||
|
if magic == Magic::Ggml {
|
||||||
|
return Ok(Self::GgmlUnversioned);
|
||||||
|
}
|
||||||
|
let version = reader.read_u32::<LittleEndian>()?;
|
||||||
|
let versioned_magic = match (magic, version) {
|
||||||
|
(Magic::Ggmf, 1) => Self::GgmfV1,
|
||||||
|
(Magic::Ggjt, 1) => Self::GgjtV1,
|
||||||
|
(Magic::Ggjt, 2) => Self::GgjtV2,
|
||||||
|
(Magic::Ggjt, 3) => Self::GgjtV3,
|
||||||
|
_ => crate::bail!("ggml: unsupported magic/version {magic:?}/{version}"),
|
||||||
|
};
|
||||||
|
Ok(versioned_magic)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn align32(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
Self::GgmlUnversioned | Self::GgmfV1 => false,
|
||||||
|
Self::GgjtV1 | Self::GgjtV2 | Self::GgjtV3 => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct HParams {
|
||||||
|
pub n_vocab: u32,
|
||||||
|
pub n_embd: u32,
|
||||||
|
pub n_mult: u32,
|
||||||
|
pub n_head: u32,
|
||||||
|
pub n_layer: u32,
|
||||||
|
pub n_rot: u32,
|
||||||
|
pub ftype: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HParams {
|
||||||
|
fn read<R: std::io::Read>(reader: &mut R) -> Result<Self> {
|
||||||
|
let n_vocab = reader.read_u32::<LittleEndian>()?;
|
||||||
|
let n_embd = reader.read_u32::<LittleEndian>()?;
|
||||||
|
let n_mult = reader.read_u32::<LittleEndian>()?;
|
||||||
|
let n_head = reader.read_u32::<LittleEndian>()?;
|
||||||
|
let n_layer = reader.read_u32::<LittleEndian>()?;
|
||||||
|
let n_rot = reader.read_u32::<LittleEndian>()?;
|
||||||
|
let ftype = reader.read_u32::<LittleEndian>()?;
|
||||||
|
Ok(Self {
|
||||||
|
n_vocab,
|
||||||
|
n_embd,
|
||||||
|
n_mult,
|
||||||
|
n_head,
|
||||||
|
n_layer,
|
||||||
|
n_rot,
|
||||||
|
ftype,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub struct Vocab {
|
||||||
|
pub token_score_pairs: Vec<(Vec<u8>, f32)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Vocab {
|
||||||
|
fn read<R: std::io::Read>(reader: &mut R, n_vocab: usize) -> Result<Self> {
|
||||||
|
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.cpp#L556
|
||||||
|
let mut token_score_pairs = Vec::with_capacity(n_vocab);
|
||||||
|
for _index in 0..n_vocab {
|
||||||
|
let len = reader.read_u32::<LittleEndian>()? as usize;
|
||||||
|
let mut word = vec![0u8; len];
|
||||||
|
reader.read_exact(&mut word)?;
|
||||||
|
let score = reader.read_f32::<LittleEndian>()?;
|
||||||
|
token_score_pairs.push((word, score))
|
||||||
|
}
|
||||||
|
Ok(Self { token_score_pairs })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dequantize_and_create_tensor<T: super::GgmlType>(
|
||||||
|
raw_data: &[u8],
|
||||||
|
tensor_elems: usize,
|
||||||
|
size_in_bytes: usize,
|
||||||
|
dims: Vec<usize>,
|
||||||
|
device: &Device,
|
||||||
|
) -> Result<Tensor> {
|
||||||
|
let mut f32_data = vec![0f32; tensor_elems];
|
||||||
|
let raw_data_ptr = raw_data.as_ptr();
|
||||||
|
let n_blocks = size_in_bytes / std::mem::size_of::<T>();
|
||||||
|
let raw_data = unsafe { std::slice::from_raw_parts(raw_data_ptr as *const T, n_blocks) };
|
||||||
|
T::to_float(raw_data, &mut f32_data)?;
|
||||||
|
Tensor::from_vec(f32_data, dims, device)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a [Tensor] from a raw GGML tensor.
|
||||||
|
pub fn tensor_from_ggml(
|
||||||
|
ggml_dtype: GgmlDType,
|
||||||
|
raw_data: &[u8],
|
||||||
|
dims: Vec<usize>,
|
||||||
|
dtype: DType,
|
||||||
|
device: &Device,
|
||||||
|
) -> Result<Tensor> {
|
||||||
|
let tensor_elems = dims.iter().product::<usize>();
|
||||||
|
let size_in_bytes = tensor_elems * ggml_dtype.type_size() / ggml_dtype.blck_size();
|
||||||
|
|
||||||
|
let tensor = match ggml_dtype {
|
||||||
|
GgmlDType::F32 => Tensor::from_raw_buffer(raw_data, DType::F32, &dims, device),
|
||||||
|
GgmlDType::F16 => Tensor::from_raw_buffer(raw_data, DType::F16, &dims, device),
|
||||||
|
GgmlDType::Q4_0 => dequantize_and_create_tensor::<k_quants::BlockQ4_0>(
|
||||||
|
raw_data,
|
||||||
|
tensor_elems,
|
||||||
|
size_in_bytes,
|
||||||
|
dims,
|
||||||
|
device,
|
||||||
|
),
|
||||||
|
GgmlDType::Q4_1 => dequantize_and_create_tensor::<k_quants::BlockQ4_1>(
|
||||||
|
raw_data,
|
||||||
|
tensor_elems,
|
||||||
|
size_in_bytes,
|
||||||
|
dims,
|
||||||
|
device,
|
||||||
|
),
|
||||||
|
GgmlDType::Q5_0 => dequantize_and_create_tensor::<k_quants::BlockQ5_0>(
|
||||||
|
raw_data,
|
||||||
|
tensor_elems,
|
||||||
|
size_in_bytes,
|
||||||
|
dims,
|
||||||
|
device,
|
||||||
|
),
|
||||||
|
GgmlDType::Q5_1 => dequantize_and_create_tensor::<k_quants::BlockQ5_1>(
|
||||||
|
raw_data,
|
||||||
|
tensor_elems,
|
||||||
|
size_in_bytes,
|
||||||
|
dims,
|
||||||
|
device,
|
||||||
|
),
|
||||||
|
GgmlDType::Q8_0 => dequantize_and_create_tensor::<k_quants::BlockQ8_0>(
|
||||||
|
raw_data,
|
||||||
|
tensor_elems,
|
||||||
|
size_in_bytes,
|
||||||
|
dims,
|
||||||
|
device,
|
||||||
|
),
|
||||||
|
GgmlDType::Q2K => dequantize_and_create_tensor::<k_quants::BlockQ2K>(
|
||||||
|
raw_data,
|
||||||
|
tensor_elems,
|
||||||
|
size_in_bytes,
|
||||||
|
dims,
|
||||||
|
device,
|
||||||
|
),
|
||||||
|
GgmlDType::Q3K => dequantize_and_create_tensor::<k_quants::BlockQ3K>(
|
||||||
|
raw_data,
|
||||||
|
tensor_elems,
|
||||||
|
size_in_bytes,
|
||||||
|
dims,
|
||||||
|
device,
|
||||||
|
),
|
||||||
|
GgmlDType::Q4K => dequantize_and_create_tensor::<k_quants::BlockQ4K>(
|
||||||
|
raw_data,
|
||||||
|
tensor_elems,
|
||||||
|
size_in_bytes,
|
||||||
|
dims,
|
||||||
|
device,
|
||||||
|
),
|
||||||
|
GgmlDType::Q5K => dequantize_and_create_tensor::<k_quants::BlockQ5K>(
|
||||||
|
raw_data,
|
||||||
|
tensor_elems,
|
||||||
|
size_in_bytes,
|
||||||
|
dims,
|
||||||
|
device,
|
||||||
|
),
|
||||||
|
GgmlDType::Q6K => dequantize_and_create_tensor::<k_quants::BlockQ6K>(
|
||||||
|
raw_data,
|
||||||
|
tensor_elems,
|
||||||
|
size_in_bytes,
|
||||||
|
dims,
|
||||||
|
device,
|
||||||
|
),
|
||||||
|
_ => crate::bail!("quantized type {dtype:?} is not supported yet"),
|
||||||
|
}?;
|
||||||
|
//We only have ggml-quant to f32 conversions, meaning we have to convert to the desired type
|
||||||
|
if tensor.dtype() != dtype {
|
||||||
|
tensor.to_dtype(dtype)
|
||||||
|
} else {
|
||||||
|
Ok(tensor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_one_tensor<R: std::io::Seek + std::io::Read>(
|
||||||
|
reader: &mut R,
|
||||||
|
magic: VersionedMagic,
|
||||||
|
dtype: DType,
|
||||||
|
device: &Device,
|
||||||
|
) -> Result<(String, Tensor)> {
|
||||||
|
let n_dims = reader.read_u32::<LittleEndian>()?;
|
||||||
|
let name_len = reader.read_u32::<LittleEndian>()?;
|
||||||
|
let ggml_dtype = reader.read_u32::<LittleEndian>()?;
|
||||||
|
let ggml_dtype = GgmlDType::from_u32(ggml_dtype)?;
|
||||||
|
let mut dims = vec![0u32; n_dims as usize];
|
||||||
|
reader.read_u32_into::<LittleEndian>(&mut dims)?;
|
||||||
|
let mut name = vec![0u8; name_len as usize];
|
||||||
|
reader.read_exact(&mut name)?;
|
||||||
|
let name = String::from_utf8_lossy(&name).into_owned();
|
||||||
|
|
||||||
|
if magic.align32() {
|
||||||
|
let pos = reader.stream_position()?;
|
||||||
|
reader.seek(std::io::SeekFrom::Current(((32 - pos % 32) % 32) as i64))?;
|
||||||
|
}
|
||||||
|
let dims = dims.iter().map(|&u| u as usize).collect::<Vec<_>>();
|
||||||
|
let tensor_elems = dims.iter().product::<usize>();
|
||||||
|
let size_in_bytes = tensor_elems * ggml_dtype.type_size() / ggml_dtype.blck_size();
|
||||||
|
println!("{name} {ggml_dtype:?} {dims:?}");
|
||||||
|
// TODO: Mmap version to avoid copying the data around?
|
||||||
|
let mut raw_data = vec![0u8; size_in_bytes];
|
||||||
|
reader.read_exact(&mut raw_data)?;
|
||||||
|
match tensor_from_ggml(ggml_dtype, &raw_data, dims, dtype, device) {
|
||||||
|
Ok(tensor) => Ok((name, tensor)),
|
||||||
|
Err(e) => crate::bail!("Error creating tensor {name}: {e}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Content {
|
||||||
|
pub magic: VersionedMagic,
|
||||||
|
pub hparams: HParams,
|
||||||
|
pub vocab: Vocab,
|
||||||
|
pub tensors: Vec<(String, Tensor)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Content {
|
||||||
|
pub fn read<R: std::io::Seek + std::io::Read>(
|
||||||
|
reader: &mut R,
|
||||||
|
dtype: DType,
|
||||||
|
device: &Device,
|
||||||
|
) -> Result<Content> {
|
||||||
|
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.cpp#L505
|
||||||
|
let last_position = reader.seek(std::io::SeekFrom::End(0))?;
|
||||||
|
reader.seek(std::io::SeekFrom::Start(0))?;
|
||||||
|
let magic = VersionedMagic::read(reader)?;
|
||||||
|
let hparams = HParams::read(reader)?;
|
||||||
|
let vocab = Vocab::read(reader, hparams.n_vocab as usize)?;
|
||||||
|
let mut tensors = vec![];
|
||||||
|
|
||||||
|
while reader.stream_position()? != last_position {
|
||||||
|
let (name, tensor) = read_one_tensor(reader, magic, dtype, device)?;
|
||||||
|
tensors.push((name, tensor))
|
||||||
|
}
|
||||||
|
Ok(Self {
|
||||||
|
magic,
|
||||||
|
hparams,
|
||||||
|
vocab,
|
||||||
|
tensors,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -1,7 +1,5 @@
|
|||||||
//! Support for the GGML file format.
|
use super::GgmlDType;
|
||||||
|
use crate::Result;
|
||||||
use crate::{DType, Device, Result, Tensor};
|
|
||||||
use byteorder::{LittleEndian, ReadBytesExt};
|
|
||||||
use half::f16;
|
use half::f16;
|
||||||
|
|
||||||
// Default to QK_K 256 rather than 64.
|
// Default to QK_K 256 rather than 64.
|
||||||
@ -728,367 +726,3 @@ pub fn matmul<T: GgmlType>(
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.h#L37
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
||||||
enum Magic {
|
|
||||||
Ggjt,
|
|
||||||
Ggla,
|
|
||||||
Ggmf,
|
|
||||||
Ggml,
|
|
||||||
Ggsn,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<u32> for Magic {
|
|
||||||
type Error = crate::Error;
|
|
||||||
fn try_from(value: u32) -> Result<Self> {
|
|
||||||
let magic = match value {
|
|
||||||
0x67676a74 => Self::Ggjt,
|
|
||||||
0x67676c61 => Self::Ggla,
|
|
||||||
0x67676d66 => Self::Ggmf,
|
|
||||||
0x67676d6c => Self::Ggml,
|
|
||||||
0x6767736e => Self::Ggsn,
|
|
||||||
_ => crate::bail!("unknown magic {value:08x}"),
|
|
||||||
};
|
|
||||||
Ok(magic)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
||||||
pub enum VersionedMagic {
|
|
||||||
GgmlUnversioned,
|
|
||||||
GgmfV1,
|
|
||||||
GgjtV1,
|
|
||||||
GgjtV2,
|
|
||||||
GgjtV3,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl VersionedMagic {
|
|
||||||
fn read<R: std::io::Read>(reader: &mut R) -> Result<Self> {
|
|
||||||
let magic = reader.read_u32::<LittleEndian>()?;
|
|
||||||
let magic = Magic::try_from(magic)?;
|
|
||||||
if magic == Magic::Ggml {
|
|
||||||
return Ok(Self::GgmlUnversioned);
|
|
||||||
}
|
|
||||||
let version = reader.read_u32::<LittleEndian>()?;
|
|
||||||
let versioned_magic = match (magic, version) {
|
|
||||||
(Magic::Ggmf, 1) => Self::GgmfV1,
|
|
||||||
(Magic::Ggjt, 1) => Self::GgjtV1,
|
|
||||||
(Magic::Ggjt, 2) => Self::GgjtV2,
|
|
||||||
(Magic::Ggjt, 3) => Self::GgjtV3,
|
|
||||||
_ => crate::bail!("ggml: unsupported magic/version {magic:?}/{version}"),
|
|
||||||
};
|
|
||||||
Ok(versioned_magic)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn align32(&self) -> bool {
|
|
||||||
match self {
|
|
||||||
Self::GgmlUnversioned | Self::GgmfV1 => false,
|
|
||||||
Self::GgjtV1 | Self::GgjtV2 | Self::GgjtV3 => true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
||||||
pub struct HParams {
|
|
||||||
pub n_vocab: u32,
|
|
||||||
pub n_embd: u32,
|
|
||||||
pub n_mult: u32,
|
|
||||||
pub n_head: u32,
|
|
||||||
pub n_layer: u32,
|
|
||||||
pub n_rot: u32,
|
|
||||||
pub ftype: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HParams {
|
|
||||||
fn read<R: std::io::Read>(reader: &mut R) -> Result<Self> {
|
|
||||||
let n_vocab = reader.read_u32::<LittleEndian>()?;
|
|
||||||
let n_embd = reader.read_u32::<LittleEndian>()?;
|
|
||||||
let n_mult = reader.read_u32::<LittleEndian>()?;
|
|
||||||
let n_head = reader.read_u32::<LittleEndian>()?;
|
|
||||||
let n_layer = reader.read_u32::<LittleEndian>()?;
|
|
||||||
let n_rot = reader.read_u32::<LittleEndian>()?;
|
|
||||||
let ftype = reader.read_u32::<LittleEndian>()?;
|
|
||||||
Ok(Self {
|
|
||||||
n_vocab,
|
|
||||||
n_embd,
|
|
||||||
n_mult,
|
|
||||||
n_head,
|
|
||||||
n_layer,
|
|
||||||
n_rot,
|
|
||||||
ftype,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
pub struct Vocab {
|
|
||||||
pub token_score_pairs: Vec<(Vec<u8>, f32)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Vocab {
|
|
||||||
fn read<R: std::io::Read>(reader: &mut R, n_vocab: usize) -> Result<Self> {
|
|
||||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.cpp#L556
|
|
||||||
let mut token_score_pairs = Vec::with_capacity(n_vocab);
|
|
||||||
for _index in 0..n_vocab {
|
|
||||||
let len = reader.read_u32::<LittleEndian>()? as usize;
|
|
||||||
let mut word = vec![0u8; len];
|
|
||||||
reader.read_exact(&mut word)?;
|
|
||||||
let score = reader.read_f32::<LittleEndian>()?;
|
|
||||||
token_score_pairs.push((word, score))
|
|
||||||
}
|
|
||||||
Ok(Self { token_score_pairs })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
||||||
pub enum GgmlDType {
|
|
||||||
F32,
|
|
||||||
F16,
|
|
||||||
Q4_0,
|
|
||||||
Q4_1,
|
|
||||||
Q5_0,
|
|
||||||
Q5_1,
|
|
||||||
Q8_0,
|
|
||||||
Q8_1,
|
|
||||||
Q2K,
|
|
||||||
Q3K,
|
|
||||||
Q4K,
|
|
||||||
Q5K,
|
|
||||||
Q6K,
|
|
||||||
Q8K,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GgmlDType {
|
|
||||||
fn from_u32(u: u32) -> Result<Self> {
|
|
||||||
let dtype = match u {
|
|
||||||
0 => Self::F32,
|
|
||||||
1 => Self::F16,
|
|
||||||
2 => Self::Q4_0,
|
|
||||||
3 => Self::Q4_1,
|
|
||||||
6 => Self::Q5_0,
|
|
||||||
7 => Self::Q5_1,
|
|
||||||
8 => Self::Q8_0,
|
|
||||||
9 => Self::Q8_1,
|
|
||||||
10 => Self::Q2K,
|
|
||||||
11 => Self::Q3K,
|
|
||||||
12 => Self::Q4K,
|
|
||||||
13 => Self::Q5K,
|
|
||||||
14 => Self::Q6K,
|
|
||||||
15 => Self::Q8K,
|
|
||||||
_ => crate::bail!("unknown dtype for tensor {u}"),
|
|
||||||
};
|
|
||||||
Ok(dtype)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn type_size(&self) -> usize {
|
|
||||||
match self {
|
|
||||||
Self::F32 => 4,
|
|
||||||
Self::F16 => 2,
|
|
||||||
Self::Q4_0 => std::mem::size_of::<BlockQ4_0>(),
|
|
||||||
Self::Q4_1 => std::mem::size_of::<BlockQ4_1>(),
|
|
||||||
Self::Q5_0 => std::mem::size_of::<BlockQ5_0>(),
|
|
||||||
Self::Q5_1 => std::mem::size_of::<BlockQ5_1>(),
|
|
||||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L932
|
|
||||||
Self::Q8_0 => std::mem::size_of::<BlockQ8_0>(),
|
|
||||||
Self::Q8_1 => std::mem::size_of::<BlockQ8_1>(),
|
|
||||||
Self::Q2K => std::mem::size_of::<BlockQ2K>(),
|
|
||||||
Self::Q3K => std::mem::size_of::<BlockQ3K>(),
|
|
||||||
Self::Q4K => std::mem::size_of::<BlockQ4K>(),
|
|
||||||
Self::Q5K => std::mem::size_of::<BlockQ5K>(),
|
|
||||||
Self::Q6K => std::mem::size_of::<BlockQ6K>(),
|
|
||||||
Self::Q8K => std::mem::size_of::<BlockQ8K>(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn blck_size(&self) -> usize {
|
|
||||||
match self {
|
|
||||||
Self::F32 => 1,
|
|
||||||
Self::F16 => 1,
|
|
||||||
Self::Q4_0 => QK4_0,
|
|
||||||
Self::Q4_1 => QK4_1,
|
|
||||||
Self::Q5_0 => QK5_0,
|
|
||||||
Self::Q5_1 => QK5_1,
|
|
||||||
Self::Q8_0 => QK8_0,
|
|
||||||
Self::Q8_1 => QK8_1,
|
|
||||||
Self::Q2K | Self::Q3K | Self::Q4K | Self::Q5K | Self::Q6K | Self::Q8K => QK_K,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dequantize_and_create_tensor<T: GgmlType>(
|
|
||||||
raw_data: &[u8],
|
|
||||||
tensor_elems: usize,
|
|
||||||
size_in_bytes: usize,
|
|
||||||
dims: Vec<usize>,
|
|
||||||
device: &Device,
|
|
||||||
) -> Result<Tensor> {
|
|
||||||
let mut f32_data = vec![0f32; tensor_elems];
|
|
||||||
let raw_data_ptr = raw_data.as_ptr();
|
|
||||||
let n_blocks = size_in_bytes / std::mem::size_of::<T>();
|
|
||||||
let raw_data = unsafe { std::slice::from_raw_parts(raw_data_ptr as *const T, n_blocks) };
|
|
||||||
T::to_float(raw_data, &mut f32_data)?;
|
|
||||||
Tensor::from_vec(f32_data, dims, device)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a [Tensor] from a raw GGML tensor.
|
|
||||||
pub fn tensor_from_ggml(
|
|
||||||
ggml_dtype: GgmlDType,
|
|
||||||
raw_data: &[u8],
|
|
||||||
dims: Vec<usize>,
|
|
||||||
dtype: DType,
|
|
||||||
device: &Device,
|
|
||||||
) -> Result<Tensor> {
|
|
||||||
let tensor_elems = dims.iter().product::<usize>();
|
|
||||||
let size_in_bytes = tensor_elems * ggml_dtype.type_size() / ggml_dtype.blck_size();
|
|
||||||
|
|
||||||
let tensor = match ggml_dtype {
|
|
||||||
GgmlDType::F32 => Tensor::from_raw_buffer(raw_data, DType::F32, &dims, device),
|
|
||||||
GgmlDType::F16 => Tensor::from_raw_buffer(raw_data, DType::F16, &dims, device),
|
|
||||||
GgmlDType::Q4_0 => dequantize_and_create_tensor::<BlockQ4_0>(
|
|
||||||
raw_data,
|
|
||||||
tensor_elems,
|
|
||||||
size_in_bytes,
|
|
||||||
dims,
|
|
||||||
device,
|
|
||||||
),
|
|
||||||
GgmlDType::Q4_1 => dequantize_and_create_tensor::<BlockQ4_1>(
|
|
||||||
raw_data,
|
|
||||||
tensor_elems,
|
|
||||||
size_in_bytes,
|
|
||||||
dims,
|
|
||||||
device,
|
|
||||||
),
|
|
||||||
GgmlDType::Q5_0 => dequantize_and_create_tensor::<BlockQ5_0>(
|
|
||||||
raw_data,
|
|
||||||
tensor_elems,
|
|
||||||
size_in_bytes,
|
|
||||||
dims,
|
|
||||||
device,
|
|
||||||
),
|
|
||||||
GgmlDType::Q5_1 => dequantize_and_create_tensor::<BlockQ5_1>(
|
|
||||||
raw_data,
|
|
||||||
tensor_elems,
|
|
||||||
size_in_bytes,
|
|
||||||
dims,
|
|
||||||
device,
|
|
||||||
),
|
|
||||||
GgmlDType::Q8_0 => dequantize_and_create_tensor::<BlockQ8_0>(
|
|
||||||
raw_data,
|
|
||||||
tensor_elems,
|
|
||||||
size_in_bytes,
|
|
||||||
dims,
|
|
||||||
device,
|
|
||||||
),
|
|
||||||
GgmlDType::Q2K => dequantize_and_create_tensor::<BlockQ2K>(
|
|
||||||
raw_data,
|
|
||||||
tensor_elems,
|
|
||||||
size_in_bytes,
|
|
||||||
dims,
|
|
||||||
device,
|
|
||||||
),
|
|
||||||
GgmlDType::Q3K => dequantize_and_create_tensor::<BlockQ3K>(
|
|
||||||
raw_data,
|
|
||||||
tensor_elems,
|
|
||||||
size_in_bytes,
|
|
||||||
dims,
|
|
||||||
device,
|
|
||||||
),
|
|
||||||
GgmlDType::Q4K => dequantize_and_create_tensor::<BlockQ4K>(
|
|
||||||
raw_data,
|
|
||||||
tensor_elems,
|
|
||||||
size_in_bytes,
|
|
||||||
dims,
|
|
||||||
device,
|
|
||||||
),
|
|
||||||
GgmlDType::Q5K => dequantize_and_create_tensor::<BlockQ5K>(
|
|
||||||
raw_data,
|
|
||||||
tensor_elems,
|
|
||||||
size_in_bytes,
|
|
||||||
dims,
|
|
||||||
device,
|
|
||||||
),
|
|
||||||
GgmlDType::Q6K => dequantize_and_create_tensor::<BlockQ6K>(
|
|
||||||
raw_data,
|
|
||||||
tensor_elems,
|
|
||||||
size_in_bytes,
|
|
||||||
dims,
|
|
||||||
device,
|
|
||||||
),
|
|
||||||
_ => crate::bail!("quantized type {dtype:?} is not supported yet"),
|
|
||||||
}?;
|
|
||||||
//We only have ggml-quant to f32 conversions, meaning we have to convert to the desired type
|
|
||||||
if tensor.dtype() != dtype {
|
|
||||||
tensor.to_dtype(dtype)
|
|
||||||
} else {
|
|
||||||
Ok(tensor)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_one_tensor<R: std::io::Seek + std::io::Read>(
|
|
||||||
reader: &mut R,
|
|
||||||
magic: VersionedMagic,
|
|
||||||
dtype: DType,
|
|
||||||
device: &Device,
|
|
||||||
) -> Result<(String, Tensor)> {
|
|
||||||
let n_dims = reader.read_u32::<LittleEndian>()?;
|
|
||||||
let name_len = reader.read_u32::<LittleEndian>()?;
|
|
||||||
let ggml_dtype = reader.read_u32::<LittleEndian>()?;
|
|
||||||
let ggml_dtype = GgmlDType::from_u32(ggml_dtype)?;
|
|
||||||
let mut dims = vec![0u32; n_dims as usize];
|
|
||||||
reader.read_u32_into::<LittleEndian>(&mut dims)?;
|
|
||||||
let mut name = vec![0u8; name_len as usize];
|
|
||||||
reader.read_exact(&mut name)?;
|
|
||||||
let name = String::from_utf8_lossy(&name).into_owned();
|
|
||||||
|
|
||||||
if magic.align32() {
|
|
||||||
let pos = reader.stream_position()?;
|
|
||||||
reader.seek(std::io::SeekFrom::Current(((32 - pos % 32) % 32) as i64))?;
|
|
||||||
}
|
|
||||||
let dims = dims.iter().map(|&u| u as usize).collect::<Vec<_>>();
|
|
||||||
let tensor_elems = dims.iter().product::<usize>();
|
|
||||||
let size_in_bytes = tensor_elems * ggml_dtype.type_size() / ggml_dtype.blck_size();
|
|
||||||
println!("{name} {ggml_dtype:?} {dims:?}");
|
|
||||||
// TODO: Mmap version to avoid copying the data around?
|
|
||||||
let mut raw_data = vec![0u8; size_in_bytes];
|
|
||||||
reader.read_exact(&mut raw_data)?;
|
|
||||||
match tensor_from_ggml(ggml_dtype, &raw_data, dims, dtype, device) {
|
|
||||||
Ok(tensor) => Ok((name, tensor)),
|
|
||||||
Err(e) => crate::bail!("Error creating tensor {name}: {e}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Content {
|
|
||||||
pub magic: VersionedMagic,
|
|
||||||
pub hparams: HParams,
|
|
||||||
pub vocab: Vocab,
|
|
||||||
pub tensors: Vec<(String, Tensor)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Content {
|
|
||||||
pub fn read<R: std::io::Seek + std::io::Read>(
|
|
||||||
reader: &mut R,
|
|
||||||
dtype: DType,
|
|
||||||
device: &Device,
|
|
||||||
) -> Result<Content> {
|
|
||||||
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.cpp#L505
|
|
||||||
let last_position = reader.seek(std::io::SeekFrom::End(0))?;
|
|
||||||
reader.seek(std::io::SeekFrom::Start(0))?;
|
|
||||||
let magic = VersionedMagic::read(reader)?;
|
|
||||||
let hparams = HParams::read(reader)?;
|
|
||||||
let vocab = Vocab::read(reader, hparams.n_vocab as usize)?;
|
|
||||||
let mut tensors = vec![];
|
|
||||||
|
|
||||||
while reader.stream_position()? != last_position {
|
|
||||||
let (name, tensor) = read_one_tensor(reader, magic, dtype, device)?;
|
|
||||||
tensors.push((name, tensor))
|
|
||||||
}
|
|
||||||
Ok(Self {
|
|
||||||
magic,
|
|
||||||
hparams,
|
|
||||||
vocab,
|
|
||||||
tensors,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
82
candle-core/src/quantized/mod.rs
Normal file
82
candle-core/src/quantized/mod.rs
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
use crate::Result;
|
||||||
|
|
||||||
|
pub mod ggml_file;
|
||||||
|
pub mod k_quants;
|
||||||
|
|
||||||
|
pub use k_quants::GgmlType;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum GgmlDType {
|
||||||
|
F32,
|
||||||
|
F16,
|
||||||
|
Q4_0,
|
||||||
|
Q4_1,
|
||||||
|
Q5_0,
|
||||||
|
Q5_1,
|
||||||
|
Q8_0,
|
||||||
|
Q8_1,
|
||||||
|
Q2K,
|
||||||
|
Q3K,
|
||||||
|
Q4K,
|
||||||
|
Q5K,
|
||||||
|
Q6K,
|
||||||
|
Q8K,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GgmlDType {
|
||||||
|
pub(crate) fn from_u32(u: u32) -> Result<Self> {
|
||||||
|
let dtype = match u {
|
||||||
|
0 => Self::F32,
|
||||||
|
1 => Self::F16,
|
||||||
|
2 => Self::Q4_0,
|
||||||
|
3 => Self::Q4_1,
|
||||||
|
6 => Self::Q5_0,
|
||||||
|
7 => Self::Q5_1,
|
||||||
|
8 => Self::Q8_0,
|
||||||
|
9 => Self::Q8_1,
|
||||||
|
10 => Self::Q2K,
|
||||||
|
11 => Self::Q3K,
|
||||||
|
12 => Self::Q4K,
|
||||||
|
13 => Self::Q5K,
|
||||||
|
14 => Self::Q6K,
|
||||||
|
15 => Self::Q8K,
|
||||||
|
_ => crate::bail!("unknown dtype for tensor {u}"),
|
||||||
|
};
|
||||||
|
Ok(dtype)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn type_size(&self) -> usize {
|
||||||
|
use k_quants::*;
|
||||||
|
match self {
|
||||||
|
Self::F32 => 4,
|
||||||
|
Self::F16 => 2,
|
||||||
|
Self::Q4_0 => std::mem::size_of::<BlockQ4_0>(),
|
||||||
|
Self::Q4_1 => std::mem::size_of::<BlockQ4_1>(),
|
||||||
|
Self::Q5_0 => std::mem::size_of::<BlockQ5_0>(),
|
||||||
|
Self::Q5_1 => std::mem::size_of::<BlockQ5_1>(),
|
||||||
|
// https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L932
|
||||||
|
Self::Q8_0 => std::mem::size_of::<BlockQ8_0>(),
|
||||||
|
Self::Q8_1 => std::mem::size_of::<BlockQ8_1>(),
|
||||||
|
Self::Q2K => std::mem::size_of::<BlockQ2K>(),
|
||||||
|
Self::Q3K => std::mem::size_of::<BlockQ3K>(),
|
||||||
|
Self::Q4K => std::mem::size_of::<BlockQ4K>(),
|
||||||
|
Self::Q5K => std::mem::size_of::<BlockQ5K>(),
|
||||||
|
Self::Q6K => std::mem::size_of::<BlockQ6K>(),
|
||||||
|
Self::Q8K => std::mem::size_of::<BlockQ8K>(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn blck_size(&self) -> usize {
|
||||||
|
match self {
|
||||||
|
Self::F32 => 1,
|
||||||
|
Self::F16 => 1,
|
||||||
|
Self::Q4_0 => k_quants::QK4_0,
|
||||||
|
Self::Q4_1 => k_quants::QK4_1,
|
||||||
|
Self::Q5_0 => k_quants::QK5_0,
|
||||||
|
Self::Q5_1 => k_quants::QK5_1,
|
||||||
|
Self::Q8_0 => k_quants::QK8_0,
|
||||||
|
Self::Q8_1 => k_quants::QK8_1,
|
||||||
|
Self::Q2K | Self::Q3K | Self::Q4K | Self::Q5K | Self::Q6K | Self::Q8K => k_quants::QK_K,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,18 +1,18 @@
|
|||||||
use candle_core::{ggml, Device, Result, Tensor};
|
use candle_core::{quantized, Device, Result, Tensor};
|
||||||
use ggml::GgmlType;
|
use quantized::{k_quants, GgmlType};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn ggml_matmul() -> Result<()> {
|
fn quantized_matmul() -> Result<()> {
|
||||||
let cpu = &Device::Cpu;
|
let cpu = &Device::Cpu;
|
||||||
let (m, k, n) = (3, 64, 4);
|
let (m, k, n) = (3, 64, 4);
|
||||||
let lhs = (0..(m * k)).map(|v| v as f32).collect::<Vec<_>>();
|
let lhs = (0..(m * k)).map(|v| v as f32).collect::<Vec<_>>();
|
||||||
let tensor_lhs = Tensor::from_slice(&lhs, (m, k), cpu)?;
|
let tensor_lhs = Tensor::from_slice(&lhs, (m, k), cpu)?;
|
||||||
let mut dst = vec![42.; 3 * 4];
|
let mut dst = vec![42.; 3 * 4];
|
||||||
let mut rhs_t = vec![ggml::BlockQ4_0::zeros(); 8];
|
let mut rhs_t = vec![k_quants::BlockQ4_0::zeros(); 8];
|
||||||
let rhs = (0..(k * n)).map(|v| v as f32).collect::<Vec<_>>();
|
let rhs = (0..(k * n)).map(|v| v as f32).collect::<Vec<_>>();
|
||||||
let tensor_rhs = Tensor::from_slice(&rhs, (n, k), cpu)?.t()?;
|
let tensor_rhs = Tensor::from_slice(&rhs, (n, k), cpu)?.t()?;
|
||||||
ggml::BlockQ4_0::from_float(&rhs, &mut rhs_t)?;
|
k_quants::BlockQ4_0::from_float(&rhs, &mut rhs_t)?;
|
||||||
ggml::matmul((m, k, n), &lhs, &rhs_t, &mut dst)?;
|
k_quants::matmul((m, k, n), &lhs, &rhs_t, &mut dst)?;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
dst,
|
dst,
|
||||||
&[
|
&[
|
@ -2,7 +2,7 @@ use anyhow::Result;
|
|||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
|
|
||||||
use candle::ggml::Content;
|
use candle::quantized::ggml_file::Content;
|
||||||
use candle::{DType, Device};
|
use candle::{DType, Device};
|
||||||
|
|
||||||
#[derive(Parser, Debug)]
|
#[derive(Parser, Debug)]
|
||||||
|
Reference in New Issue
Block a user