Skip to content

Commit

Permalink
Merge pull request #928 from LaurentMazare/pytorch-2.6
Browse files Browse the repository at this point in the history
Update for pytorch 2.6.
  • Loading branch information
LaurentMazare authored Jan 30, 2025
2 parents 5ee0fcc + a40ffc4 commit 10b8823
Show file tree
Hide file tree
Showing 13 changed files with 400 additions and 24 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## Unreleased
### Changed

## v0.19.0
### Changed
- PyTorch v2.6 support

## v0.18.1
### Changed
- PyTorch v2.5.1 support
Expand Down
4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "tch"
version = "0.18.1"
version = "0.19.0"
authors = ["Laurent Mazare <[email protected]>"]
edition = "2021"
build = "build.rs"
Expand All @@ -22,7 +22,7 @@ libc = "0.2.0"
ndarray = "0.16.1"
rand = "0.8"
thiserror = "1"
torch-sys = { version = "0.18.1", path = "torch-sys" }
torch-sys = { version = "0.19.0", path = "torch-sys" }
zip = "0.6"
half = "2"
safetensors = "0.3.0"
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ The code generation part for the C api on top of libtorch comes from

## Getting Started

This crate requires the C++ PyTorch library (libtorch) in version *v2.5.1* to be available on
This crate requires the C++ PyTorch library (libtorch) in version *v2.6.0* to be available on
your system. You can either:

- Use the system-wide libtorch installation (default).
Expand Down Expand Up @@ -85,7 +85,7 @@ seem to include `libtorch.a` by default so this would have to be compiled
manually, e.g. via the following:

```bash
git clone -b v2.5.1 --recurse-submodule https://github.com/pytorch/pytorch.git pytorch-static --depth 1
git clone -b v2.6.0 --recurse-submodule https://github.com/pytorch/pytorch.git pytorch-static --depth 1
cd pytorch-static
USE_CUDA=OFF BUILD_SHARED_LIBS=OFF python setup.py build
# export LIBTORCH to point at the build directory in pytorch-static.
Expand Down
6 changes: 3 additions & 3 deletions examples/python-extension/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,6 @@ crate-type = ["cdylib"]

[dependencies]
pyo3 = { version = "0.21", features = ["extension-module"] }
pyo3-tch = { path = "../../pyo3-tch", version = "0.18.1" }
tch = { path = "../..", features = ["python-extension"], version = "0.18.1" }
torch-sys = { path = "../../torch-sys", features = ["python-extension"], version = "0.18.1" }
pyo3-tch = { path = "../../pyo3-tch", version = "0.19.0" }
tch = { path = "../..", features = ["python-extension"], version = "0.19.0" }
torch-sys = { path = "../../torch-sys", features = ["python-extension"], version = "0.19.0" }
5 changes: 3 additions & 2 deletions gen/gen.ml
Original file line number Diff line number Diff line change
Expand Up @@ -691,7 +691,8 @@ let write_fallible_wrapper funcs filename =
List.iter func.args ~f:(fun arg ->
match arg.arg_type with
| DoubleOption | Int64Option ->
pm " let %s = %s.into();" arg.arg_name arg.arg_name
let arg_name = Func.rust_name arg.arg_name in
pm " let %s = %s.into();" arg_name arg_name
| _ -> ());
match func.returns with
| `dynamic ->
Expand Down Expand Up @@ -882,7 +883,7 @@ let run

let () =
run
~yaml_filename:"third_party/pytorch/Declarations-v2.5.0.yaml"
~yaml_filename:"third_party/pytorch/Declarations-v2.6.0.yaml"
~cpp_filename:"torch-sys/libtch/torch_api_generated"
~ffi_filename:"torch-sys/src/c_generated.rs"
~wrapper_filename:"src/wrappers/tensor_generated.rs"
Expand Down
6 changes: 3 additions & 3 deletions pyo3-tch/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "pyo3-tch"
version = "0.18.1"
version = "0.19.0"
authors = ["Laurent Mazare <[email protected]>"]
edition = "2021"
build = "build.rs"
Expand All @@ -12,6 +12,6 @@ categories = ["science"]
license = "MIT/Apache-2.0"

[dependencies]
tch = { path = "..", features = ["python-extension"], version = "0.18.1" }
torch-sys = { path = "../torch-sys", features = ["python-extension"], version = "0.18.1" }
tch = { path = "..", features = ["python-extension"], version = "0.19.0" }
torch-sys = { path = "../torch-sys", features = ["python-extension"], version = "0.19.0" }
pyo3 = { version = "0.21", features = ["extension-module"] }
164 changes: 162 additions & 2 deletions src/wrappers/tensor_fallible_generated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -635,14 +635,18 @@ impl Tensor {
size: impl IntListOption,
stride: impl IntListOption,
dtype: impl Into<Option<Kind>>,
device: Device,
layout: Option<Layout>,
) -> Result<(), TchError> {
unsafe_torch_err!(atg__assert_tensor_metadata(
a.c_tensor,
size.as_ptr(),
size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
dtype.into().map_or(-1, |s| s.c_int())
dtype.into().map_or(-1, |s| s.c_int()),
device.c_int(),
layout.map_or(-1, |s| s.to_i8())
));
Ok(())
}
Expand Down Expand Up @@ -1271,6 +1275,19 @@ impl Tensor {
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_internal_convert_weight_to_int4pack_for_cpu(
&self,
innerktiles: i64,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__convert_weight_to_int4pack_for_cpu(
c_tensors.as_mut_ptr(),
self.c_tensor,
innerktiles
));
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_internal_convolution<T: Borrow<Tensor>>(
&self,
weight: &Tensor,
Expand Down Expand Up @@ -1485,6 +1502,8 @@ impl Tensor {
out_dtype: impl Into<Option<Kind>>,
transpose_result: bool,
alg_id: i64,
split_k: i64,
split_k_one_kernel: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__cslt_sparse_mm(
Expand All @@ -1495,7 +1514,9 @@ impl Tensor {
alpha.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
out_dtype.into().map_or(-1, |s| s.c_int()),
if transpose_result { 1 } else { 0 },
alg_id
alg_id,
split_k,
if split_k_one_kernel { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}
Expand Down Expand Up @@ -4805,6 +4826,31 @@ impl Tensor {
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_internal_nested_from_padded_tensor<T: Borrow<Tensor>>(
padded: &Tensor,
offsets: &Tensor,
dummy: &Tensor,
ragged_idx: i64,
min_seqlen: Option<T>,
max_seqlen: Option<T>,
sum_s: impl Into<Option<i64>>,
) -> Result<Tensor, TchError> {
let sum_s = sum_s.into();
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nested_from_padded_tensor(
c_tensors.as_mut_ptr(),
padded.c_tensor,
offsets.c_tensor,
dummy.c_tensor,
ragged_idx,
min_seqlen.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
max_seqlen.as_ref().map_or(std::ptr::null_mut(), |t| t.borrow().c_tensor),
sum_s.unwrap_or(0i64),
sum_s.is_none() as i8
));
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_internal_nested_get_jagged_dummy(any: &Tensor) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__nested_get_jagged_dummy(c_tensors.as_mut_ptr(), any.c_tensor));
Expand Down Expand Up @@ -8879,6 +8925,23 @@ impl Tensor {
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_internal_weight_int4pack_mm_for_cpu(
&self,
mat2: &Tensor,
qgroupsize: i64,
qscaleandzeros: &Tensor,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg__weight_int4pack_mm_for_cpu(
c_tensors.as_mut_ptr(),
self.c_tensor,
mat2.c_tensor,
qgroupsize,
qscaleandzeros.c_tensor
));
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_internal_weight_int8pack_mm(
&self,
mat2: &Tensor,
Expand Down Expand Up @@ -9118,6 +9181,22 @@ impl Tensor {
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_adaptive_avg_pool1d_out(
&self,
out: &Tensor,
output_size: impl IntList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_adaptive_avg_pool2d(&self, output_size: impl IntList) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_adaptive_avg_pool2d(
Expand Down Expand Up @@ -10627,6 +10706,32 @@ impl Tensor {
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_avg_pool1d_out(
&self,
out: &Tensor,
kernel_size: impl IntList,
stride: impl IntList,
padding: impl IntList,
ceil_mode: bool,
count_include_pad: bool,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_avg_pool1d_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
kernel_size.as_ptr(),
kernel_size.len_i32(),
stride.as_ptr(),
stride.len_i32(),
padding.as_ptr(),
padding.len_i32(),
if ceil_mode { 1 } else { 0 },
if count_include_pad { 1 } else { 0 }
));
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_avg_pool2d(
&self,
kernel_size: impl IntList,
Expand Down Expand Up @@ -30513,6 +30618,21 @@ impl Tensor {
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_rrelu_with_noise_functional(
&self,
noise: &Tensor,
training: bool,
) -> Result<(Tensor, Tensor), TchError> {
let mut c_tensors = [std::ptr::null_mut(); 2];
unsafe_torch_err!(atg_rrelu_with_noise_functional(
c_tensors.as_mut_ptr(),
self.c_tensor,
noise.c_tensor,
if training { 1 } else { 0 }
));
Ok((Tensor { c_tensor: c_tensors[0] }, Tensor { c_tensor: c_tensors[1] }))
}

pub fn f_rrelu_with_noise_out(
&self,
out: &Tensor,
Expand Down Expand Up @@ -37091,6 +37211,27 @@ impl Tensor {
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_upsample_bilinear2d_vec_out(
&self,
out: &Tensor,
output_size: impl IntListOption,
align_corners: bool,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_bilinear2d_vec_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
if align_corners { 1 } else { 0 },
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_upsample_linear1d(
&self,
output_size: impl IntList,
Expand Down Expand Up @@ -37414,6 +37555,25 @@ impl Tensor {
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_upsample_nearest2d_vec_out(
&self,
out: &Tensor,
output_size: impl IntListOption,
scale_factors: impl DoubleList,
) -> Result<Tensor, TchError> {
let mut c_tensors = [std::ptr::null_mut(); 1];
unsafe_torch_err!(atg_upsample_nearest2d_vec_out(
c_tensors.as_mut_ptr(),
out.c_tensor,
self.c_tensor,
output_size.as_ptr(),
output_size.len_i32(),
scale_factors.as_ptr(),
scale_factors.len_i32()
));
Ok(Tensor { c_tensor: c_tensors[0] })
}

pub fn f_upsample_nearest3d(
&self,
output_size: impl IntList,
Expand Down
Loading

0 comments on commit 10b8823

Please sign in to comment.