Bug#1093354: pytorch-geometric: FTBFS: Error: Python 3.13+ not yet supported for torch.compile
Santiago Vila
sanvila at debian.org
Fri Jan 17 18:54:05 GMT 2025
Package: src:pytorch-geometric
Version: 2.6.1-1
Severity: serious
Tags: ftbfs trixie sid
Dear maintainer:
During a rebuild of all packages in unstable, your package failed to build:
--------------------------------------------------------------------------------
[...]
debian/rules clean
dh clean --buildsystem pybuild
dh_auto_clean -O--buildsystem=pybuild
dh_autoreconf_clean -O--buildsystem=pybuild
dh_clean -O--buildsystem=pybuild
debian/rules binary
dh binary --buildsystem pybuild
dh_update_autotools_config -O--buildsystem=pybuild
dh_autoreconf -O--buildsystem=pybuild
dh_auto_configure -O--buildsystem=pybuild
dh_auto_build -O--buildsystem=pybuild
I: pybuild plugin_pyproject:129: Building wheel for python3.13 with "build" module
I: pybuild base:311: python3.13 -m build --skip-dependency-check --no-isolation --wheel --outdir /<<PKGBUILDDIR>>/.pybuild/cpython3_3.13_torch-geometric
* Building wheel...
[... snipped ...]
model = Model(
in_channels=8,
hidden_channels=16,
num_layers=2,
**kwargs,
).to(device)
> explanation = dynamo.explain(model)(x, edge_index)
[1m[31mtest/nn/models/test_basic_gnn.py[0m:359:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:832: in inner
opt_f = optimize(
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:716: in optimize
return _optimize(rebuild_ctx, *args, **kwargs)
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:755: in _optimize
check_if_dynamo_supported()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
def check_if_dynamo_supported():
if sys.version_info >= (3, 13):
> raise RuntimeError("Python 3.13+ not yet supported for torch.compile")
[1m[31mE RuntimeError: Python 3.13+ not yet supported for torch.compile[0m
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:689: RuntimeError
[31m[1m______________________ test_compile_graph_breaks[GIN-cpu] ______________________[0m
Model = <class 'torch_geometric.nn.models.basic_gnn.GIN'>
device = device(type='cpu')
@withDevice
@onlyLinux
@withPackage('torch>=2.1.0')
@pytest.mark.parametrize('Model', [GCN, GraphSAGE, GIN, GAT, EdgeCNN, PNA])
def test_compile_graph_breaks(Model, device):
import torch._dynamo as dynamo
x = torch.randn(3, 8, device=device)
edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]], device=device)
kwargs = {}
if Model in {GCN, GAT}:
# Adding self-loops inside the model leads to graph breaks :(
kwargs['add_self_loops'] = False
if Model in {PNA}: # `PNA` requires additional arguments:
kwargs['aggregators'] = ['sum', 'mean', 'min', 'max', 'var', 'std']
kwargs['scalers'] = ['identity', 'amplification', 'attenuation']
kwargs['deg'] = torch.tensor([1, 2, 1])
model = Model(
in_channels=8,
hidden_channels=16,
num_layers=2,
**kwargs,
).to(device)
> explanation = dynamo.explain(model)(x, edge_index)
[1m[31mtest/nn/models/test_basic_gnn.py[0m:359:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:832: in inner
opt_f = optimize(
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:716: in optimize
return _optimize(rebuild_ctx, *args, **kwargs)
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:755: in _optimize
check_if_dynamo_supported()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
def check_if_dynamo_supported():
if sys.version_info >= (3, 13):
> raise RuntimeError("Python 3.13+ not yet supported for torch.compile")
[1m[31mE RuntimeError: Python 3.13+ not yet supported for torch.compile[0m
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:689: RuntimeError
[31m[1m______________________ test_compile_graph_breaks[GAT-cpu] ______________________[0m
Model = <class 'torch_geometric.nn.models.basic_gnn.GAT'>
device = device(type='cpu')
@withDevice
@onlyLinux
@withPackage('torch>=2.1.0')
@pytest.mark.parametrize('Model', [GCN, GraphSAGE, GIN, GAT, EdgeCNN, PNA])
def test_compile_graph_breaks(Model, device):
import torch._dynamo as dynamo
x = torch.randn(3, 8, device=device)
edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]], device=device)
kwargs = {}
if Model in {GCN, GAT}:
# Adding self-loops inside the model leads to graph breaks :(
kwargs['add_self_loops'] = False
if Model in {PNA}: # `PNA` requires additional arguments:
kwargs['aggregators'] = ['sum', 'mean', 'min', 'max', 'var', 'std']
kwargs['scalers'] = ['identity', 'amplification', 'attenuation']
kwargs['deg'] = torch.tensor([1, 2, 1])
model = Model(
in_channels=8,
hidden_channels=16,
num_layers=2,
**kwargs,
).to(device)
> explanation = dynamo.explain(model)(x, edge_index)
[1m[31mtest/nn/models/test_basic_gnn.py[0m:359:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:832: in inner
opt_f = optimize(
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:716: in optimize
return _optimize(rebuild_ctx, *args, **kwargs)
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:755: in _optimize
check_if_dynamo_supported()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
def check_if_dynamo_supported():
if sys.version_info >= (3, 13):
> raise RuntimeError("Python 3.13+ not yet supported for torch.compile")
[1m[31mE RuntimeError: Python 3.13+ not yet supported for torch.compile[0m
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:689: RuntimeError
[31m[1m____________________ test_compile_graph_breaks[EdgeCNN-cpu] ____________________[0m
Model = <class 'torch_geometric.nn.models.basic_gnn.EdgeCNN'>
device = device(type='cpu')
@withDevice
@onlyLinux
@withPackage('torch>=2.1.0')
@pytest.mark.parametrize('Model', [GCN, GraphSAGE, GIN, GAT, EdgeCNN, PNA])
def test_compile_graph_breaks(Model, device):
import torch._dynamo as dynamo
x = torch.randn(3, 8, device=device)
edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]], device=device)
kwargs = {}
if Model in {GCN, GAT}:
# Adding self-loops inside the model leads to graph breaks :(
kwargs['add_self_loops'] = False
if Model in {PNA}: # `PNA` requires additional arguments:
kwargs['aggregators'] = ['sum', 'mean', 'min', 'max', 'var', 'std']
kwargs['scalers'] = ['identity', 'amplification', 'attenuation']
kwargs['deg'] = torch.tensor([1, 2, 1])
model = Model(
in_channels=8,
hidden_channels=16,
num_layers=2,
**kwargs,
).to(device)
> explanation = dynamo.explain(model)(x, edge_index)
[1m[31mtest/nn/models/test_basic_gnn.py[0m:359:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:832: in inner
opt_f = optimize(
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:716: in optimize
return _optimize(rebuild_ctx, *args, **kwargs)
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:755: in _optimize
check_if_dynamo_supported()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
def check_if_dynamo_supported():
if sys.version_info >= (3, 13):
> raise RuntimeError("Python 3.13+ not yet supported for torch.compile")
[1m[31mE RuntimeError: Python 3.13+ not yet supported for torch.compile[0m
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:689: RuntimeError
[31m[1m______________________ test_compile_graph_breaks[PNA-cpu] ______________________[0m
Model = <class 'torch_geometric.nn.models.basic_gnn.PNA'>
device = device(type='cpu')
@withDevice
@onlyLinux
@withPackage('torch>=2.1.0')
@pytest.mark.parametrize('Model', [GCN, GraphSAGE, GIN, GAT, EdgeCNN, PNA])
def test_compile_graph_breaks(Model, device):
import torch._dynamo as dynamo
x = torch.randn(3, 8, device=device)
edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]], device=device)
kwargs = {}
if Model in {GCN, GAT}:
# Adding self-loops inside the model leads to graph breaks :(
kwargs['add_self_loops'] = False
if Model in {PNA}: # `PNA` requires additional arguments:
kwargs['aggregators'] = ['sum', 'mean', 'min', 'max', 'var', 'std']
kwargs['scalers'] = ['identity', 'amplification', 'attenuation']
kwargs['deg'] = torch.tensor([1, 2, 1])
model = Model(
in_channels=8,
hidden_channels=16,
num_layers=2,
**kwargs,
).to(device)
> explanation = dynamo.explain(model)(x, edge_index)
[1m[31mtest/nn/models/test_basic_gnn.py[0m:359:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:832: in inner
opt_f = optimize(
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:716: in optimize
return _optimize(rebuild_ctx, *args, **kwargs)
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:755: in _optimize
check_if_dynamo_supported()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
def check_if_dynamo_supported():
if sys.version_info >= (3, 13):
> raise RuntimeError("Python 3.13+ not yet supported for torch.compile")
[1m[31mE RuntimeError: Python 3.13+ not yet supported for torch.compile[0m
[1m[31m/usr/lib/python3/dist-packages/torch/_dynamo/eval_frame.py[0m:689: RuntimeError
[31m[1m_________________________________ test_compile _________________________________[0m
@onlyLinux
@withPackage('torch>=2.0.0')
def test_compile():
> model = torch.compile(torch.nn.Linear(1, 1))
[1m[31mtest/test_isinstance.py[0m:14:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = Linear(in_features=1, out_features=1, bias=True)
def compile(
model: _Optional[_Callable] = None,
*,
fullgraph: builtins.bool = False,
dynamic: _Optional[builtins.bool] = None,
backend: _Union[str, _Callable] = "inductor",
mode: _Union[str, None] = None,
options: _Optional[_Dict[str, _Union[str, builtins.int, builtins.bool]]] = None,
disable: builtins.bool = False,
) -> _Union[
_Callable[[_Callable[_InputT, _RetT]], _Callable[_InputT, _RetT]],
_Callable[_InputT, _RetT],
]:
"""
Optimizes given model/function using TorchDynamo and specified backend.
If you are compiling an :class:`torch.nn.Module`, you can also use :meth:`torch.nn.Module.compile`
to compile the module inplace without changing its structure.
Concretely, for every frame executed within the compiled region, we will attempt
to compile it and cache the compiled result on the code object for future
use. A single frame may be compiled multiple times if previous compiled
results are not applicable for subsequent calls (this is called a "guard
failure), you can use TORCH_LOGS=guards to debug these situations.
Multiple compiled results can be associated with a frame up to
``torch._dynamo.config.cache_size_limit``, which defaults to 8; at which
point we will fall back to eager. Note that compile caches are per
*code object*, not frame; if you dynamically create multiple copies of a
function, they will all share the same code cache.
Args:
model (Callable): Module/function to optimize
fullgraph (bool): If False (default), torch.compile attempts to discover compileable regions
in the function that it will optimize. If True, then we require that the entire function be
capturable into a single graph. If this is not possible (that is, if there are graph breaks),
then this will raise an error.
dynamic (bool or None): Use dynamic shape tracing. When this is True, we will up-front attempt
to generate a kernel that is as dynamic as possible to avoid recompilations when
sizes change. This may not always work as some operations/optimizations will
force specialization; use TORCH_LOGS=dynamic to debug overspecialization.
When this is False, we will NEVER generate dynamic kernels, we will always specialize.
By default (None), we automatically detect if dynamism has occurred and compile a more
dynamic kernel upon recompile.
backend (str or Callable): backend to be used
- "inductor" is the default backend, which is a good balance between performance and overhead
- Non experimental in-tree backends can be seen with `torch._dynamo.list_backends()`
- Experimental or debug in-tree backends can be seen with `torch._dynamo.list_backends(None)`
- To register an out-of-tree custom backend:
https://pytorch.org/docs/main/torch.compiler_custom_backends.html#registering-custom-backends
mode (str): Can be either "default", "reduce-overhead", "max-autotune" or "max-autotune-no-cudagraphs"
- "default" is the default mode, which is a good balance between performance and overhead
- "reduce-overhead" is a mode that reduces the overhead of python with CUDA graphs,
useful for small batches. Reduction of overhead can come at the cost of more memory
usage, as we will cache the workspace memory required for the invocation so that we
do not have to reallocate it on subsequent runs. Reduction of overhead is not guaranteed
to work; today, we only reduce overhead for CUDA only graphs which do not mutate inputs.
There are other circumstances where CUDA graphs are not applicable; use TORCH_LOG=perf_hints
to debug.
- "max-autotune" is a mode that leverages Triton or template based matrix multiplications
on supported devices and Triton based convolutions on GPU.
It enables CUDA graphs by default on GPU.
- "max-autotune-no-cudagraphs" is a mode similar to "max-autotune" but without CUDA graphs
- To see the exact configs that each mode sets you can call `torch._inductor.list_mode_options()`
options (dict): A dictionary of options to pass to the backend. Some notable ones to try out are
- `epilogue_fusion` which fuses pointwise ops into templates. Requires `max_autotune` to also be set
- `max_autotune` which will profile to pick the best matmul configuration
- `fallback_random` which is useful when debugging accuracy issues
- `shape_padding` which pads matrix shapes to better align loads on GPUs especially for tensor cores
- `triton.cudagraphs` which will reduce the overhead of python with CUDA graphs
- `trace.enabled` which is the most useful debugging flag to turn on
- `trace.graph_diagram` which will show you a picture of your graph after fusion
- For inductor you can see the full list of configs that it supports by calling `torch._inductor.list_options()`
disable (bool): Turn torch.compile() into a no-op for testing
Example::
@torch.compile(options={"triton.cudagraphs": True}, fullgraph=True)
def foo(x):
return torch.sin(x) + torch.cos(x)
"""
_C._log_api_usage_once("torch.compile")
if sys.version_info >= (3, 13):
> raise RuntimeError("Dynamo is not supported on Python 3.13+")
[1m[31mE RuntimeError: Dynamo is not supported on Python 3.13+[0m
[1m[31m/usr/lib/python3/dist-packages/torch/__init__.py[0m:2416: RuntimeError
[33m=============================== warnings summary ===============================[0m
torch_geometric/inspector.py:433: 60 warnings
test/contrib/nn/models/test_rbcd_attack.py: 792 warnings
test/explain/algorithm/test_attention_explainer.py: 483 warnings
test/explain/algorithm/test_captum.py: 46 warnings
test/explain/algorithm/test_explain_algorithm_utils.py: 106 warnings
test/explain/algorithm/test_gnn_explainer.py: 22530 warnings
test/explain/algorithm/test_graphmask_explainer.py: 18144 warnings
test/explain/algorithm/test_pg_explainer.py: 414 warnings
test/loader/test_neighbor_loader.py: 146 warnings
test/nn/conv/test_agnn_conv.py: 30 warnings
test/nn/conv/test_antisymmetric_conv.py: 11 warnings
test/nn/conv/test_arma_conv.py: 24 warnings
test/nn/conv/test_cg_conv.py: 68 warnings
test/nn/conv/test_cheb_conv.py: 20 warnings
test/nn/conv/test_cluster_gcn_conv.py: 13 warnings
test/nn/conv/test_create_gnn.py: 10 warnings
test/nn/conv/test_dir_gnn_conv.py: 20 warnings
test/nn/conv/test_dna_conv.py: 57 warnings
test/nn/conv/test_edge_conv.py: 22 warnings
test/nn/conv/test_eg_conv.py: 72 warnings
test/nn/conv/test_fa_conv.py: 15 warnings
test/nn/conv/test_feast_conv.py: 11 warnings
test/nn/conv/test_film_conv.py: 47 warnings
test/nn/conv/test_gat_conv.py: 144 warnings
test/nn/conv/test_gated_graph_conv.py: 13 warnings
test/nn/conv/test_gatv2_conv.py: 96 warnings
test/nn/conv/test_gcn2_conv.py: 13 warnings
test/nn/conv/test_gcn_conv.py: 66 warnings
test/nn/conv/test_gen_conv.py: 92 warnings
test/nn/conv/test_general_conv.py: 210 warnings
test/nn/conv/test_gin_conv.py: 63 warnings
test/nn/conv/test_gmm_conv.py: 82 warnings
test/nn/conv/test_gps_conv.py: 60 warnings
test/nn/conv/test_graph_conv.py: 38 warnings
test/nn/conv/test_gravnet_conv.py: 12 warnings
test/nn/conv/test_han_conv.py: 58 warnings
test/nn/conv/test_heat_conv.py: 33 warnings
test/nn/conv/test_hetero_conv.py: 352 warnings
test/nn/conv/test_hgt_conv.py: 105 warnings
test/nn/conv/test_hypergraph_conv.py: 44 warnings
test/nn/conv/test_le_conv.py: 14 warnings
test/nn/conv/test_lg_conv.py: 13 warnings
test/nn/conv/test_message_passing.py: 462 warnings
test/nn/conv/test_mf_conv.py: 21 warnings
test/nn/conv/test_mixhop_conv.py: 13 warnings
test/nn/conv/test_nn_conv.py: 22 warnings
test/nn/conv/test_pan_conv.py: 11 warnings
test/nn/conv/test_pdn_conv.py: 24 warnings
test/nn/conv/test_pna_conv.py: 24 warnings
test/nn/conv/test_point_conv.py: 13 warnings
test/nn/conv/test_point_gnn_conv.py: 14 warnings
test/nn/conv/test_point_transformer_conv.py: 51 warnings
test/nn/conv/test_ppf_conv.py: 16 warnings
test/nn/conv/test_res_gated_graph_conv.py: 52 warnings
test/nn/conv/test_rgat_conv.py: 3858 warnings
test/nn/conv/test_rgcn_conv.py: 211 warnings
test/nn/conv/test_sage_conv.py: 170 warnings
test/nn/conv/test_sg_conv.py: 13 warnings
test/nn/conv/test_signed_conv.py: 21 warnings
test/nn/conv/test_simple_conv.py: 46 warnings
test/nn/conv/test_ssg_conv.py: 13 warnings
test/nn/conv/test_static_graph.py: 30 warnings
test/nn/conv/test_supergat_conv.py: 25 warnings
test/nn/conv/test_tag_conv.py: 24 warnings
test/nn/conv/test_transformer_conv.py: 120 warnings
test/nn/conv/test_wl_conv_continuous.py: 13 warnings
test/nn/dense/test_dense_gat_conv.py: 64 warnings
test/nn/dense/test_dense_gcn_conv.py: 11 warnings
test/nn/dense/test_dense_gin_conv.py: 10 warnings
test/nn/dense/test_dense_graph_conv.py: 72 warnings
test/nn/dense/test_dense_sage_conv.py: 10 warnings
test/nn/models/test_attentive_fp.py: 48 warnings
test/nn/models/test_basic_gnn.py: 104798 warnings
test/nn/models/test_correct_and_smooth.py: 46 warnings
test/nn/models/test_deep_graph_infomax.py: 22 warnings
test/nn/models/test_deepgcn.py: 80 warnings
test/nn/models/test_label_prop.py: 11 warnings
test/nn/models/test_lightgcn.py: 792 warnings
test/nn/models/test_linkx.py: 24 warnings
test/nn/models/test_neural_fingerprint.py: 80 warnings
test/nn/models/test_pmlp.py: 11 warnings
test/nn/models/test_rect.py: 11 warnings
test/nn/models/test_rev_gnn.py: 208 warnings
test/nn/models/test_schnet.py: 100 warnings
test/nn/models/test_signed_gcn.py: 20 warnings
test/nn/models/test_visnet.py: 260 warnings
test/nn/pool/test_pan_pool.py: 11 warnings
test/nn/pool/test_sag_pool.py: 147 warnings
test/nn/test_sequential.py: 178 warnings
test/nn/test_to_hetero_module.py: 10 warnings
test/nn/test_to_hetero_transformer.py: 318 warnings
test/nn/test_to_hetero_with_bases_transformer.py: 140 warnings
test/profile/test_profiler.py: 20 warnings
test/test_inspector.py: 14 warnings
test/utils/test_embedding.py: 22 warnings
test/utils/test_subgraph.py: 22 warnings
test/visualization/test_influence.py: 22 warnings
/<<PKGBUILDDIR>>/.pybuild/cpython3_3.13_torch-geometric/build/torch_geometric/inspector.py:433: DeprecationWarning: Failing to pass a value to the 'type_params' parameter of 'typing._eval_type' is deprecated, as it leads to incorrect behaviour when calling typing._eval_type on a stringified annotation that references a PEP 695 type parameter. It will be disallowed in Python 3.15.
return typing._eval_type(value, _globals, None) # type: ignore
torch_geometric/graphgym/config.py:19
/<<PKGBUILDDIR>>/.pybuild/cpython3_3.13_torch-geometric/build/torch_geometric/graphgym/config.py:19: UserWarning: Could not define global config object. Please install 'yacs' via 'pip install yacs' in order to use GraphGym
warnings.warn("Could not define global config object. Please install "
torch_geometric/graphgym/imports.py:14
/<<PKGBUILDDIR>>/.pybuild/cpython3_3.13_torch-geometric/build/torch_geometric/graphgym/imports.py:14: UserWarning: Please install 'pytorch_lightning' via 'pip install pytorch_lightning' in order to use GraphGym
warnings.warn("Please install 'pytorch_lightning' via "
test/loader/test_dataloader.py: 17 warnings
/usr/lib/python3.13/multiprocessing/popen_fork.py:67: DeprecationWarning: This process (pid=170812) is multi-threaded, use of fork() may lead to deadlocks in the child.
self.pid = os.fork()
test/loader/test_imbalanced_sampler.py: 2 warnings
test/loader/test_link_neighbor_loader.py: 22 warnings
test/loader/test_mixin.py: 3 warnings
test/loader/test_neighbor_loader.py: 16 warnings
test/loader/test_zip_loader.py: 2 warnings
test/nn/conv/test_pna_conv.py: 1 warning
/<<PKGBUILDDIR>>/.pybuild/cpython3_3.13_torch-geometric/build/torch_geometric/sampler/neighbor_sampler.py:61: UserWarning: Using 'NeighborSampler' without a 'pyg-lib' installation is deprecated and will be removed soon. Please install 'pyg-lib' for accelerated neighborhood sampling
warnings.warn(f"Using '{self.__class__.__name__}' without a "
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
[36m[1m=========================== short test summary info ============================[0m
[31mFAILED[0m test/data/test_feature_store.py::[1mtest_feature_store[0m - RuntimeError: Boolean value of Tensor with more than one value is ambiguous
[31mFAILED[0m test/nn/conv/test_hetero_conv.py::[1mtest_compile_hetero_conv_graph_breaks[cpu][0m - RuntimeError: Python 3.13+ not yet supported for torch.compile
[31mFAILED[0m test/nn/conv/test_sage_conv.py::[1mtest_compile_multi_aggr_sage_conv[cpu][0m - RuntimeError: Python 3.13+ not yet supported for torch.compile
[31mFAILED[0m test/nn/models/test_basic_gnn.py::[1mtest_packaging[0m - AttributeError: module 'typing' has no attribute 'io'. Did you mean: 'IO'?
[31mFAILED[0m test/nn/models/test_basic_gnn.py::[1mtest_compile_graph_breaks[GCN-cpu][0m - RuntimeError: Python 3.13+ not yet supported for torch.compile
[31mFAILED[0m test/nn/models/test_basic_gnn.py::[1mtest_compile_graph_breaks[GraphSAGE-cpu][0m - RuntimeError: Python 3.13+ not yet supported for torch.compile
[31mFAILED[0m test/nn/models/test_basic_gnn.py::[1mtest_compile_graph_breaks[GIN-cpu][0m - RuntimeError: Python 3.13+ not yet supported for torch.compile
[31mFAILED[0m test/nn/models/test_basic_gnn.py::[1mtest_compile_graph_breaks[GAT-cpu][0m - RuntimeError: Python 3.13+ not yet supported for torch.compile
[31mFAILED[0m test/nn/models/test_basic_gnn.py::[1mtest_compile_graph_breaks[EdgeCNN-cpu][0m - RuntimeError: Python 3.13+ not yet supported for torch.compile
[31mFAILED[0m test/nn/models/test_basic_gnn.py::[1mtest_compile_graph_breaks[PNA-cpu][0m - RuntimeError: Python 3.13+ not yet supported for torch.compile
[31mFAILED[0m test/test_isinstance.py::[1mtest_compile[0m - RuntimeError: Dynamo is not supported on Python 3.13+
[31m= [31m[1m11 failed[0m, [32m5533 passed[0m, [33m862 skipped[0m, [33m53 deselected[0m, [33m157563 warnings[0m[31m in 89.04s (0:01:29)[0m[31m =[0m
E: pybuild pybuild:389: test: plugin pyproject failed with: exit code=1: cd /<<PKGBUILDDIR>>/.pybuild/cpython3_3.13_torch-geometric/build; python3.13 -m pytest -k 'not test_citeseer and not test_enzymes and not test_mutag and not test_basic_gnn_inference and not _on_cora and not test_torch_profile and not test_appnp and not test_asap and not test_two_hop and not test_add_random_walk_pe and not test_graph_unet and not test_spspmm and not test_add_metapaths and not test_type_repr'
dh_auto_test: error: pybuild --test --test-pytest -i python{version} -p 3.13 returned exit code 13
make: *** [debian/rules:12: binary] Error 25
dpkg-buildpackage: error: debian/rules binary subprocess returned exit status 2
--------------------------------------------------------------------------------
The above is just how the build ends and not necessarily the most relevant part.
If required, the full build log is available here:
https://people.debian.org/~sanvila/build-logs/202501/
About the archive rebuild: The build was made on virtual machines from AWS,
using sbuild and a reduced chroot with only build-essential packages.
If you could not reproduce the bug please contact me privately, as I
am willing to provide ssh access to a virtual machine where the bug is
fully reproducible.
If this is really a bug in one of the build-depends, please use
reassign and add an affects on src:pytorch-geometric, so that this is still
visible in the BTS web page for this package.
Thanks.
More information about the debian-science-maintainers
mailing list