mirror of
https://github.com/huggingface/candle.git
synced 2025-06-15 02:16:37 +00:00

* Some first `Module` implementations * Add `state_dict` and `load_state_dict` functionality * Move modules around and create `candle.nn.Linear` * Add `nn.Embedding` and `nn.LayerNorm` * Add BERT implementation * Batch q-matmul * Automatically dequantize `QTensors` if a `Tensor` is expected * Add Module `.to()`, `.cuda()`, `cpu()` and `.type()` functionality * Unittests for `Module`, `Tensor` and `candle.utils` * Add `pytorch` like slicing to `Tensor` * Cleanup and BERT fixes * `black` formatting + unit-test for `nn.Linear` * Refactor slicing implementation
27 lines
457 B
Python
27 lines
457 B
Python
import candle
|
|
|
|
t = candle.Tensor(42.0)
|
|
print(t)
|
|
print(t.shape, t.rank, t.device)
|
|
print(t + t)
|
|
|
|
t = candle.Tensor([3.0, 1, 4, 1, 5, 9, 2, 6])
|
|
print(t)
|
|
print(t + t)
|
|
|
|
t = t.reshape([2, 4])
|
|
print(t.matmul(t.t()))
|
|
|
|
print(t.to_dtype(candle.u8))
|
|
print(t.to_dtype("u8"))
|
|
|
|
t = candle.randn((5, 3))
|
|
print(t)
|
|
print(t.dtype)
|
|
|
|
t = candle.randn((16, 256))
|
|
quant_t = t.quantize("q6k")
|
|
dequant_t = quant_t.dequantize()
|
|
diff2 = (t - dequant_t).sqr()
|
|
print(diff2.mean_all())
|