Add a mention to the replit-code model in the readme. (#1121)

This commit is contained in:
Laurent Mazare
2023-10-18 11:27:23 +01:00
committed by GitHub
parent 767a6578f1
commit 63c204c79e
3 changed files with 24 additions and 27 deletions

View File

@ -67,6 +67,7 @@ We also provide a some command line based examples using state of the art models
- [Mistral7b-v0.1](./candle-examples/examples/mistral/): a 7b general LLM with - [Mistral7b-v0.1](./candle-examples/examples/mistral/): a 7b general LLM with
performance larger than all publicly available 13b models as of 2023-09-28. performance larger than all publicly available 13b models as of 2023-09-28.
- [StarCoder](./candle-examples/examples/bigcode/): LLM specialized to code generation. - [StarCoder](./candle-examples/examples/bigcode/): LLM specialized to code generation.
- [Replit-code-v1.5](./candle-examples/examples/replit-code/): a 3.3b LLM specialized for code completion.
- [Quantized LLaMA](./candle-examples/examples/quantized/): quantized version of - [Quantized LLaMA](./candle-examples/examples/quantized/): quantized version of
the LLaMA model using the same quantization techniques as the LLaMA model using the same quantization techniques as
[llama.cpp](https://github.com/ggerganov/llama.cpp). [llama.cpp](https://github.com/ggerganov/llama.cpp).
@ -155,6 +156,7 @@ If you have an addition to this list, please submit a pull request.
- Phi v1.5. - Phi v1.5.
- Mistral 7b v0.1. - Mistral 7b v0.1.
- StableLM-3B-4E1T. - StableLM-3B-4E1T.
- Replit-code-v1.5-3B.
- T5. - T5.
- Bert. - Bert.
- Whisper (multi-lingual support). - Whisper (multi-lingual support).

View File

@ -9,37 +9,32 @@ in `bfloat16` (so the GPU version will only work on recent nvidia cards).
```bash ```bash
cargo run --example replit-code --release -- --prompt 'def fibonacci(n): ' cargo run --example replit-code --release -- --prompt 'def fibonacci(n): '
``` ```
This produces the following output which actually doesn't generate the fibonacci This produces the following output.
series properly.
``` ```
def fibonacci(n): # write Fibonacci series up to n def fibonacci(n): # write Fibonacci series up to n
"""Print a Fibonacci series up to n.""" """Print a Fibonacci series up to n."""
a, b = 0, 1
assert type(n) == int, "n must be an integer" while a < n:
print(a, end=' ')
if (type(fib_list)==None or len==0 ): a, b = b, a+b
fib_list = [1] print()
for i in range((len-2)): # start at 2nd element of list and go until end.
n += 1
print("Fibonacci number",n,"is:",i)
def main():
"""Call the functions."""
userInput=input('Enter a positive integer: ')
fibonacci(userInput)
def fibonacci_loop(n): # write Fibonacci series up to n
"""Print a Fibonacci series up to n."""
result = []
a, b = 0, 1
while a < n:
result.append(a)
a, b = b, a+b
return result
def fibonacci_generator(n): # write Fibonacci series up to n
"""Print a Fibonacci series up to n."""
a, b = 0, 1
if __name__ == '__main__': # only run if this file is called directly. while a < n:
print("This program prints out Fibonacci numbers.") yield a
main() a, b = b, a+b
``` ```

View File

@ -155,7 +155,7 @@ struct Args {
tokenizer: Option<String>, tokenizer: Option<String>,
/// Penalty to be applied for repeating tokens, 1. means no penalty. /// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)] #[arg(long, default_value_t = 1.)]
repeat_penalty: f32, repeat_penalty: f32,
/// The context size to consider for the repeat penalty. /// The context size to consider for the repeat penalty.