Coverage for geometric_kernels/utils/kernel_formulas/hypercube_graph.py: 89%
18 statements
« prev ^ index » next coverage.py v7.11.3, created at 2025-11-16 21:43 +0000
« prev ^ index » next coverage.py v7.11.3, created at 2025-11-16 21:43 +0000
1"""
2Implements the closed form expression for the heat kernel on the hypercube graph.
4The implementation is provided mainly for testing purposes.
5"""
7from math import sqrt
9import lab as B
10from beartype.typing import Optional
12from geometric_kernels.lab_extras import float_like
13from geometric_kernels.utils.utils import (
14 _check_1_vector,
15 _check_matrix,
16 hamming_distance,
17)
20def hypercube_graph_heat_kernel(
21 lengthscale: B.Numeric,
22 X: B.Numeric,
23 X2: Optional[B.Numeric] = None,
24 normalized_laplacian: bool = True,
25):
26 """
27 Analytic formula for the heat kernel on the hypercube graph, see
28 Equation (14) in :cite:t:`borovitskiy2023`.
30 :param lengthscale:
31 The length scale of the kernel, an array of shape [1].
32 :param X:
33 A batch of inputs, an array of shape [N, d].
34 :param X2:
35 A batch of inputs, an array of shape [N2, d]. If None, defaults to X.
37 :return:
38 The kernel matrix, an array of shape [N, N2].
39 """
40 if X2 is None:
41 X2 = X
43 _check_1_vector(lengthscale, "lengthscale")
44 _check_matrix(X, "X")
45 _check_matrix(X2, "X2")
47 if X.shape[-1] != X2.shape[-1]:
48 raise ValueError("`X` and `X2` must live in a same-dimensional space.")
50 if normalized_laplacian:
51 d = X.shape[-1]
52 lengthscale = lengthscale / sqrt(d)
54 # For TensorFlow, we need to explicitly cast the distances to double.
55 # Note: if we use B.dtype_float(X) instead of float_like(X), it gives
56 # float16 and TensorFlow is still complaining.
57 hamming_distances = B.cast(float_like(X), hamming_distance(X, X2))
59 return B.tanh(lengthscale**2 / 2) ** hamming_distances