mirror of
https://github.com/huggingface/candle.git
synced 2025-06-23 04:46:15 +00:00
Add training for the llama2.c example (#296)
* Rework the commands and run inference by default. * Add the training module and load the training dataset. * Random dataset iterator. * Proper valid-loss computation. * Compute the evaluation loss. * Add more substance to the training loop.
This commit is contained in:
@ -26,8 +26,9 @@ half = { workspace = true, optional = true }
|
||||
[dev-dependencies]
|
||||
anyhow = { workspace = true }
|
||||
byteorder = { workspace = true }
|
||||
hf-hub = { workspace = true}
|
||||
clap = { workspace = true }
|
||||
hf-hub = { workspace = true }
|
||||
memmap2 = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
tokenizers = { workspace = true, features = ["onig"] }
|
||||
tracing = { workspace = true }
|
||||
|
Reference in New Issue
Block a user