Add return types to *.pyi stubs (#880)

* Start generating return types

* Finish tensor type hinting

* Add `save_gguf` to `utils`

* Typehint `quant-llama.py`
This commit is contained in:
Lukas Kreussel
2023-09-17 23:11:01 +02:00
committed by GitHub
parent c2b866172a
commit 03e194123d
9 changed files with 611 additions and 197 deletions

View File

@ -1 +1,5 @@
from .candle import *
from .candle import *
__doc__ = candle.__doc__
if hasattr(candle, "__all__"):
__all__ = candle.__all__

View File

@ -7,7 +7,7 @@ class bf16(DType):
pass
@staticmethod
def cat(tensors: List[Tensor], dim: int):
def cat(tensors: List[Tensor], dim: int) -> Tensor:
"""
Concatenate the tensors across one axis.
"""
@ -26,31 +26,35 @@ class i64(DType):
pass
@staticmethod
def ones(shape: Sequence[int], dtype: Optional[DType] = None, device: Optional[Device] = None):
""" """
def ones(shape: Sequence[int], dtype: Optional[DType] = None, device: Optional[Device] = None) -> Tensor:
"""
Creates a new tensor filled with ones.
"""
pass
@staticmethod
def rand(shape: Sequence[int], device: Optional[Device] = None):
def rand(shape: Sequence[int], device: Optional[Device] = None) -> Tensor:
"""
Creates a new tensor with random values.
"""
pass
@staticmethod
def randn(shape: Sequence[int], device: Optional[Device] = None):
""" """
def randn(shape: Sequence[int], device: Optional[Device] = None) -> Tensor:
"""
Creates a new tensor with random values from a normal distribution.
"""
pass
@staticmethod
def stack(tensors: List[Tensor], dim: int):
def stack(tensors: List[Tensor], dim: int) -> Tensor:
"""
Stack the tensors along a new axis.
"""
pass
@staticmethod
def tensor(data: _ArrayLike):
def tensor(data: _ArrayLike) -> Tensor:
"""
Creates a new tensor from a Python value. The value can be a scalar or array-like object.
"""
@ -63,186 +67,309 @@ class u8(DType):
pass
@staticmethod
def zeros(shape: Sequence[int], dtype: Optional[DType] = None, device: Optional[Device] = None):
""" """
def zeros(shape: Sequence[int], dtype: Optional[DType] = None, device: Optional[Device] = None) -> Tensor:
"""
Creates a new tensor filled with zeros.
"""
pass
class DType:
pass
"""
A `candle` dtype.
"""
class QTensor:
def dequantize(self):
""" """
"""
A quantized tensor.
"""
def dequantize(self) -> Tensor:
"""
Dequantizes the tensor.
"""
pass
@property
def ggml_dtype(self):
""" """
def ggml_dtype(self) -> str:
"""
Gets the tensors quantized dtype.
"""
pass
def matmul_t(self, lhs):
""" """
def matmul_t(self, lhs: Tensor) -> Tensor:
"""
Performs a quantized matrix multiplication, with the quantized tensor as the right hand side.
"""
pass
@property
def rank(self):
""" """
def rank(self) -> int:
"""
Gets the rank of the tensor.
"""
pass
@property
def shape(self):
""" """
def shape(self) -> Tuple[int]:
"""
Gets the shape of the tensor.
"""
pass
class Tensor:
def __init__(data: _ArrayLike):
"""
A `candle` tensor.
"""
def __init__(self, data: _ArrayLike):
pass
def argmax_keepdim(self, dim):
""" """
pass
def argmin_keepdim(self, dim):
""" """
pass
def broadcast_add(self, rhs):
""" """
pass
def broadcast_as(self, shape):
""" """
pass
def broadcast_div(self, rhs):
""" """
pass
def broadcast_left(self, shape):
""" """
pass
def broadcast_mul(self, rhs):
""" """
pass
def broadcast_sub(self, rhs):
""" """
pass
def contiguous(self):
""" """
pass
def copy(self):
""" """
pass
def cos(self):
""" """
pass
def detach(self):
""" """
pass
@property
def device(self):
""" """
pass
@property
def dtype(self):
""" """
pass
def exp(self):
""" """
pass
def flatten_all(self):
""" """
pass
def flatten_from(self, dim):
""" """
pass
def flatten_to(self, dim):
""" """
pass
def get(self, index):
""" """
pass
def index_select(self, rhs, dim):
""" """
pass
def is_contiguous(self):
""" """
pass
def is_fortran_contiguous(self):
""" """
pass
def log(self):
""" """
pass
def matmul(self, rhs):
""" """
pass
def max_keepdim(self, dim):
""" """
pass
def mean_all(self):
""" """
pass
def min_keepdim(self, dim):
""" """
pass
def narrow(self, dim, start, len):
""" """
pass
def powf(self, p):
""" """
pass
def quantize(self, quantized_dtype):
""" """
pass
@property
def rank(self):
""" """
pass
def recip(self):
""" """
pass
def reshape(self, shape):
""" """
pass
@property
def shape(self):
def argmax_keepdim(self, dim: int) -> Tensor:
"""
Gets the tensor shape as a Python tuple.
Returns the indices of the maximum value(s) across the selected dimension.
"""
pass
def sin(self):
""" """
def argmin_keepdim(self, dim: int) -> Tensor:
"""
Returns the indices of the minimum value(s) across the selected dimension.
"""
pass
def sqr(self):
""" """
def broadcast_add(self, rhs: Tensor) -> Tensor:
"""
Adds the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor.
"""
pass
def sqrt(self):
""" """
def broadcast_as(self, shape: Sequence[int]) -> Tensor:
"""
Broadcasts the tensor to the given shape.
"""
pass
def squeeze(self, dim):
""" """
def broadcast_div(self, rhs: Tensor) -> Tensor:
"""
Divides the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor.
"""
pass
def broadcast_left(self, shape: Sequence[int]) -> Tensor:
"""
Broadcasts the tensor to the given shape, adding new dimensions on the left.
"""
pass
def broadcast_mul(self, rhs: Tensor) -> Tensor:
"""
Multiplies the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor.
"""
pass
def broadcast_sub(self, rhs: Tensor) -> Tensor:
"""
Subtracts the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor.
"""
pass
def contiguous(self) -> Tensor:
"""
Makes the tensor contiguous in memory.
"""
pass
def copy(self) -> Tensor:
"""
Returns a copy of the tensor.
"""
pass
def cos(self) -> Tensor:
"""
Performs the `cos` operation on the tensor.
"""
pass
def detach(self) -> Tensor:
"""
Detach the tensor from the computation graph.
"""
pass
@property
def stride(self):
""" """
def device(self) -> Device:
"""
Gets the tensor's device.
"""
pass
def sum_all(self):
""" """
@property
def dtype(self) -> DType:
"""
Gets the tensor's dtype.
"""
pass
def sum_keepdim(self, dims):
""" """
def exp(self) -> Tensor:
"""
Performs the `exp` operation on the tensor.
"""
pass
def t(self):
""" """
def flatten_all(self) -> Tensor:
"""
Flattens the tensor into a 1D tensor.
"""
pass
def to_device(self, device):
""" """
def flatten_from(self, dim: int) -> Tensor:
"""
Flattens the tensor on the dimension indexes from `dim` (inclusive) to the last dimension.
"""
pass
def to_dtype(self, dtype):
""" """
def flatten_to(self, dim: int) -> Tensor:
"""
Flattens the tensor on the dimension indexes from `0` to `dim` (inclusive).
"""
pass
def transpose(self, dim1, dim2):
""" """
def get(self, index: int) -> Tensor:
"""
Gets the value at the specified index.
"""
pass
def unsqueeze(self, dim):
""" """
def index_select(self, rhs: Tensor, dim: int) -> Tensor:
"""
Select values for the input tensor at the target indexes across the specified dimension.
The `indexes` is argument is an int tensor with a single dimension.
The output has the same number of dimension as the `self` input. The target dimension of
the output has length the length of `indexes` and the values are taken from `self` using
the index from `indexes`. Other dimensions have the same number of elements as the input
tensor.
"""
pass
def values(self):
def is_contiguous(self) -> bool:
"""
Returns true if the tensor is contiguous in C order.
"""
pass
def is_fortran_contiguous(self) -> bool:
"""
Returns true if the tensor is contiguous in Fortran order.
"""
pass
def log(self) -> Tensor:
"""
Performs the `log` operation on the tensor.
"""
pass
def matmul(self, rhs: Tensor) -> Tensor:
"""
Performs a matrix multiplication between the two tensors.
"""
pass
def max_keepdim(self, dim: int) -> Tensor:
"""
Gathers the maximum value across the selected dimension.
"""
pass
def mean_all(self) -> Tensor:
"""
Returns the mean of the tensor.
"""
pass
def min_keepdim(self, dim: int) -> Tensor:
"""
Gathers the minimum value across the selected dimension.
"""
pass
def narrow(self, dim: int, start: int, len: int) -> Tensor:
"""
Returns a new tensor that is a narrowed version of the input, the dimension `dim`
ranges from `start` to `start + len`.
"""
pass
def powf(self, p: float) -> Tensor:
"""
Performs the `pow` operation on the tensor with the given exponent.
"""
pass
def quantize(self, quantized_dtype: str) -> QTensor:
"""
Quantize the tensor.
"""
pass
@property
def rank(self) -> int:
"""
Gets the tensor's rank.
"""
pass
def recip(self) -> Tensor:
"""
Get the `recip` of the tensor.
"""
pass
def reshape(self, shape: Sequence[int]) -> Tensor:
"""
Reshapes the tensor to the given shape.
"""
pass
@property
def shape(self) -> Tuple[int]:
"""
Gets the tensor's shape.
"""
pass
def sin(self) -> Tensor:
"""
Performs the `sin` operation on the tensor.
"""
pass
def sqr(self) -> Tensor:
"""
Squares the tensor.
"""
pass
def sqrt(self) -> Tensor:
"""
Calculates the square root of the tensor.
"""
pass
def squeeze(self, dim: int) -> Tensor:
"""
Creates a new tensor with the specified dimension removed if its size was one.
"""
pass
@property
def stride(self) -> Tuple[int]:
"""
Gets the tensor's strides.
"""
pass
def sum_all(self) -> Tensor:
"""
Returns the sum of the tensor.
"""
pass
def sum_keepdim(self, dim: Union[int, List[int]]) -> Tensor:
"""
Returns the sum of all elements in the input tensor. The sum is performed over all the input dimensions.
"""
pass
def t(self) -> Tensor:
"""
Transposes the tensor.
"""
pass
def to_device(self, device: Union[str, Device]) -> Tensor:
"""
Move the tensor to a new device.
"""
pass
def to_dtype(self, dtype: Union[str, DType]) -> Tensor:
"""
Convert the tensor to a new dtype.
"""
pass
def transpose(self, dim1: int, dim2: int) -> Tensor:
"""
Returns a tensor that is a transposed version of the input, the given dimensions are swapped.
"""
pass
def unsqueeze(self, dim: int) -> Tensor:
"""
Creates a new tensor with a dimension of size one inserted at the specified position.
"""
pass
def values(self) -> _ArrayLike:
"""
Gets the tensor's data as a Python scalar or array-like object.
"""
pass
def where_cond(self, on_true, on_false):
""" """
def where_cond(self, on_true: Tensor, on_false: Tensor) -> Tensor:
"""
Returns a tensor with the same shape as the input tensor, the values are taken from
`on_true` if the input tensor value is not zero, and `on_false` at the positions where the
input tensor is equal to zero.
"""
pass

View File

@ -2,18 +2,18 @@
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence
from os import PathLike
from candle.typing import _ArrayLike, Device
from candle import Tensor, DType
from candle import Tensor, DType, QTensor
@staticmethod
def silu(tensor: Tensor):
def silu(tensor: Tensor) -> Tensor:
"""
Applies the Sigmoid Linear Unit (SiLU) function to a given tensor.
"""
pass
@staticmethod
def softmax(tensor: Tensor, dim: int):
def softmax(tensor: Tensor, dim: int) -> Tensor:
"""
Applies the Softmax function to a given tensor.
Applies the Softmax function to a given tensor.#
"""
pass

View File

@ -8,4 +8,5 @@ has_mkl = utils.has_mkl
load_ggml = utils.load_ggml
load_gguf = utils.load_gguf
load_safetensors = utils.load_safetensors
save_gguf = utils.save_gguf
save_safetensors = utils.save_safetensors

View File

@ -2,38 +2,38 @@
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence
from os import PathLike
from candle.typing import _ArrayLike, Device
from candle import Tensor, DType
from candle import Tensor, DType, QTensor
@staticmethod
def cuda_is_available():
def cuda_is_available() -> bool:
"""
Returns true if the 'cuda' backend is available.
"""
pass
@staticmethod
def get_num_threads():
def get_num_threads() -> int:
"""
Returns the number of threads used by the candle.
"""
pass
@staticmethod
def has_accelerate():
def has_accelerate() -> bool:
"""
Returns true if candle was compiled with 'accelerate' support.
"""
pass
@staticmethod
def has_mkl():
def has_mkl() -> bool:
"""
Returns true if candle was compiled with MKL support.
"""
pass
@staticmethod
def load_ggml(path: Union[str, PathLike]):
def load_ggml(path: Union[str, PathLike]) -> Tuple[Dict[str, QTensor], Dict[str, Any], List[str]]:
"""
Load a GGML file. Returns a tuple of three objects: a dictionary mapping tensor names to tensors,
a dictionary mapping hyperparameter names to hyperparameter values, and a vocabulary.
@ -41,7 +41,7 @@ def load_ggml(path: Union[str, PathLike]):
pass
@staticmethod
def load_gguf(path: Union[str, PathLike]):
def load_gguf(path: Union[str, PathLike]) -> Tuple[Dict[str, QTensor], Dict[str, Any]]:
"""
Loads a GGUF file. Returns a tuple of two dictionaries: the first maps tensor names to tensors,
and the second maps metadata keys to metadata values.
@ -49,14 +49,21 @@ def load_gguf(path: Union[str, PathLike]):
pass
@staticmethod
def load_safetensors(path: Union[str, PathLike]):
def load_safetensors(path: Union[str, PathLike]) -> Dict[str, Tensor]:
"""
Loads a safetensors file. Returns a dictionary mapping tensor names to tensors.
"""
pass
@staticmethod
def save_safetensors(path: Union[str, PathLike], tensors: Dict[str, Tensor]):
def save_gguf(path: Union[str, PathLike], tensors: Dict[str, QTensor], metadata: Dict[str, Any]):
"""
Save quanitzed tensors and metadata to a GGUF file.
"""
pass
@staticmethod
def save_safetensors(path: Union[str, PathLike], tensors: Dict[str, Tensor]) -> None:
"""
Saves a dictionary of tensors to a safetensors file.
"""