mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 18:48:51 +00:00
Add a matmul cuda example.
This commit is contained in:
@ -1,5 +1,5 @@
|
||||
use anyhow::Result;
|
||||
use candle::{DType, Device, Tensor};
|
||||
use candle::{Device, Tensor};
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let device = Device::new_cuda(0)?;
|
||||
@ -9,7 +9,10 @@ fn main() -> Result<()> {
|
||||
let z = (y + x * 3.)?;
|
||||
println!("{:?}", z.to_vec1::<f32>()?);
|
||||
println!("{:?}", z.sqrt()?.to_vec1::<f32>()?);
|
||||
let x = Tensor::ones((3, 2), DType::F32, &device)?;
|
||||
println!("{:?}", x.to_vec2::<f32>()?);
|
||||
let x = Tensor::new(&[[11f32, 22.], [33., 44.], [55., 66.], [77., 78.]], &device)?;
|
||||
let y = Tensor::new(&[[1f32, 2., 3.], [4., 5., 6.]], &device)?;
|
||||
println!("{:?}", y.to_vec2::<f32>()?);
|
||||
let z = x.matmul(&y)?;
|
||||
println!("{:?}", z.to_vec2::<f32>()?);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -159,14 +159,15 @@ fn gemm_config<T>(
|
||||
) -> StridedBatchedConfig<T> {
|
||||
// https://docs.nvidia.com/cuda/cublas/index.html#cublas-t-gemm
|
||||
use cudarc::cublas::sys::cublasOperation_t;
|
||||
println!("{:?} {:?} {:?}", lhs_stride, rhs_stride, (b, m, n, k));
|
||||
let gemm = GemmConfig {
|
||||
alpha,
|
||||
beta,
|
||||
m: m as i32,
|
||||
n: n as i32,
|
||||
k: k as i32,
|
||||
lda: lhs_stride[lhs_stride.len() - 2] as i32,
|
||||
ldb: rhs_stride[rhs_stride.len() - 2] as i32,
|
||||
lda: m as i32,
|
||||
ldb: k as i32,
|
||||
ldc: m as i32,
|
||||
transa: cublasOperation_t::CUBLAS_OP_N,
|
||||
transb: cublasOperation_t::CUBLAS_OP_N,
|
||||
@ -174,8 +175,8 @@ fn gemm_config<T>(
|
||||
StridedBatchedConfig {
|
||||
batch_size: b as i32,
|
||||
gemm,
|
||||
stride_a: lhs_stride[0] as i64,
|
||||
stride_b: rhs_stride[0] as i64,
|
||||
stride_a: (m * k) as i64,
|
||||
stride_b: (n * k) as i64,
|
||||
stride_c: (m * n * k) as i64,
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user