mirror of
https://github.com/huggingface/candle.git
synced 2025-06-16 02:38:10 +00:00

* Add support for loading Fortran contiguous tensors This commit introduces the ability to handle Fortran contiguous tensors in the tensor loading process. Previously, the code only supported loading tensors that were contiguous in memory, failing with an error for non-contiguous tensors. With this update, tensors identified as Fortran contiguous (column-major order) are now correctly handled by reversing their dimensions after loading. This enhancement ensures broader compatibility with different tensor layouts, improving the robustness of tensor loading operations. - Check if a tensor is Fortran contiguous using the `is_fortran_contiguous` flag. - For Fortran contiguous tensors, reverse the dimensions after loading to correctly represent their layout in memory. - Continue to bail out with an error for tensors that are neither C contiguous nor Fortran contiguous, maintaining the previous behavior for non-contiguous tensors without explicit support. This change addresses the issue of loading Fortran contiguous tensors, which was previously unsupported, thereby extending the functionality of the tensor loading mechanism to accommodate a wider variety of tensor layouts. * Add reshape step to handle fortran contiguous case * Skip fortran contiguous fix if rank is < 2 * Fail on rank 0, 1 if contiguous
38 lines
1.4 KiB
Python
38 lines
1.4 KiB
Python
import torch
|
|
from collections import OrderedDict
|
|
|
|
# Write a trivial tensor to a pt file
|
|
a= torch.tensor([[1,2,3,4], [5,6,7,8]])
|
|
o = OrderedDict()
|
|
o["test"] = a
|
|
|
|
# Write a trivial tensor to a pt file
|
|
torch.save(o, "test.pt")
|
|
|
|
############################################################################################################
|
|
# Write a trivial tensor to a pt file with a key
|
|
torch.save({"model_state_dict": o}, "test_with_key.pt")
|
|
|
|
############################################################################################################
|
|
# Create a tensor with fortran contiguous memory layout
|
|
import numpy as np
|
|
|
|
# Step 1: Create a 3D NumPy array with Fortran order using a range of numbers
|
|
# For example, creating a 2x3x4 array
|
|
array_fortran = np.asfortranarray(np.arange(1, 2*3*4 + 1).reshape(2, 3, 4))
|
|
|
|
# Verify the memory order
|
|
print("Is Fortran contiguous (F order):", array_fortran.flags['F_CONTIGUOUS']) # Should be True
|
|
print("Is C contiguous (C order):", array_fortran.flags['C_CONTIGUOUS']) # Should be False
|
|
|
|
# Step 2: Convert the NumPy array to a PyTorch tensor
|
|
tensor_fortran = torch.from_numpy(array_fortran)
|
|
|
|
# Verify the tensor layout
|
|
print("Tensor stride:", tensor_fortran.stride()) # Stride will reflect the Fortran memory layout
|
|
|
|
# Step 3: Save the PyTorch tensor to a .pth file
|
|
torch.save({"tensor_fortran": tensor_fortran}, 'fortran_tensor_3d.pth')
|
|
|
|
print("3D Tensor saved with Fortran layout.")
|