Coverage for geometric_kernels/spaces/eigenfunctions.py: 90%
90 statements
« prev ^ index » next coverage.py v7.11.3, created at 2025-11-16 21:43 +0000
« prev ^ index » next coverage.py v7.11.3, created at 2025-11-16 21:43 +0000
1"""
2This module provides base classes for storing or evaluating eigenfunctions of
3the Laplacian, or certain combinations thereof (see the note below).
5.. note::
6 Sometimes, when relations analogous to the :doc:`addition theorem
7 </theory/addition_theorem>` on the sphere are available, it is much more
8 efficient to use certain sums of outer products of eigenfunctions instead
9 of the eigenfunctions themselves. For this, we offer
10 :class:`EigenfunctionsWithAdditionTheorem`. Importantly, it is permitted to
11 _only_ provide the computational routines for these "certain sums", lacking
12 the actual capability to compute the eigenfunctions themselves. This is
13 important because for compact Lie groups, for example, computing
14 eigenfunctions is more involved and less efficient than computing the
15 "certain sums", called characters in this case. This is also relevant for
16 hyperspheres of higher dimension: in this case, the eigenfunctions
17 (spherical harmonics) are much more cumbersome than the "certain
18 sums" (zonal spherical harmonics).
19"""
21import abc
23import lab as B
24from beartype.typing import List, Optional
26from geometric_kernels.lab_extras import complex_like, is_complex, take_along_axis
29class Eigenfunctions(abc.ABC):
30 r"""
31 Abstract base class providing an interface for working with eigenfunctions.
33 We denote the basis of eigenfunctions represented by an instance of
34 this class by $[f_j]_{j=0}^{J-1}$. We assume it to be partitioned into
35 *levels* (see :class:`~.kernels.MaternKarhunenLoeveKernel` on how they are
36 used). Specifically, we call the sets $[f_{l s}]_{s=1}^{d_l}$ *levels* if
38 .. math:: [f_j]_{j=0}^{J-1} = \bigcup_{l=0}^{L-1}[f_{l s}]_{s=1}^{d_l}
40 such that all the eigenfunctions $f_{l s}$ with the same index $l$
41 correspond to the same eigenvalue $\lambda_l$. Note, however, that
42 $\lambda_l$ are not required to be unique: it is possible that for some
43 $l,l'$, $\lambda_l = \lambda_{l'}$.
45 .. note::
46 There is often more than one way to choose a partition into levels.
47 Trivially, you can always correspond a level to each individual
48 eigenfunction. Alternatively, you can partition $[f_j]_{j=0}^{J-1}$
49 into the maximal subsets corresponding to the same eigenvalue (into
50 the *full eigenspaces*). There are also often plenty of possibilities
51 in between these two extremes.
53 Importantly, subclasses of this class do not necessarily have to allow
54 computing the individual eigenfunctions (i.e. implement the method
55 :meth:`__call__`). The only methods that subclasses *have to* implement
56 are :meth:`phi_product` and its close relative :meth:`phi_product_diag`.
57 These output the sums of outer products of eigenfunctions for all levels:
59 .. math:: \sum_{s=1}^{d_l} f_{l s}(x_1) f_{l s}(x_2)
61 for all $0 \leq l < L$, and all pairs $x_1$, $x_2$ provided as inputs.
62 """
64 def weighted_outerproduct(
65 self,
66 weights: B.Numeric,
67 X: B.Numeric,
68 X2: Optional[B.Numeric] = None, # type: ignore
69 **kwargs,
70 ) -> B.Numeric:
71 r"""
72 Computes
74 .. math:: \sum_{l=0}^{L-1} w_l \sum_{s=1}^{d_l} f_{l s}(x_1) f_{l s}(x_2).
76 for all $x_1$ in `X` and all $x_2$ in `X2`, where $w_l$ are `weights`.
78 :param weights:
79 An array of shape [L, 1] where L is the number of levels.
80 :param X:
81 The first of the two batches of points to evaluate the weighted
82 outer product at. An array of shape [N, <axis>], where N is the
83 number of points and <axis> is the shape of the arrays that
84 represent the points in a given space.
85 :param X2:
86 The second of the two batches of points to evaluate the weighted
87 outer product at. An array of shape [N2, <axis>], where N2 is the
88 number of points and <axis> is the shape of the arrays that
89 represent the points in a given space.
91 Defaults to None, in which case X is used for X2.
92 :param ``**kwargs``:
93 Any additional parameters.
95 :return:
96 An array of shape [N, N2].
97 """
98 if X2 is None:
99 X2 = X
101 sum_phi_phi_for_level = self.phi_product(X, X2, **kwargs) # [N, N2, L]
103 if is_complex(sum_phi_phi_for_level):
104 sum_phi_phi_for_level = B.cast(complex_like(weights), sum_phi_phi_for_level)
105 weights = B.cast(complex_like(weights), weights)
106 else:
107 sum_phi_phi_for_level = B.cast(B.dtype(weights), sum_phi_phi_for_level)
109 return B.einsum("id,...nki->...nk", weights, sum_phi_phi_for_level) # [N, N2]
111 def weighted_outerproduct_diag(
112 self, weights: B.Numeric, X: B.Numeric, **kwargs
113 ) -> B.Numeric:
114 r"""
115 Computes the diagonal of the matrix
116 ``weighted_outerproduct(weights, X, X, **kwargs)``.
118 :param weights:
119 As in :meth:`weighted_outerproduct`.
120 :param X:
121 As in :meth:`weighted_outerproduct`.
122 :param ``**kwargs``:
123 As in :meth:`weighted_outerproduct`.
125 :return:
126 An array of shape [N,].
127 """
128 phi_product_diag = self.phi_product_diag(X, **kwargs) # [N, L]
130 if is_complex(phi_product_diag):
131 phi_product_diag = B.cast(complex_like(weights), phi_product_diag)
132 weights = B.cast(complex_like(weights), weights)
133 else:
134 phi_product_diag = B.cast(B.dtype(weights), phi_product_diag)
136 return B.einsum("id,ni->n", weights, phi_product_diag) # [N,]
138 @abc.abstractmethod
139 def phi_product(
140 self, X: B.Numeric, X2: Optional[B.Numeric] = None, **kwargs
141 ) -> B.Numeric:
142 r"""
143 Computes the
145 .. math:: \sum_{s=1}^{d_l} f_{l s}(x_1) f_{l s}(x_2)
147 for all $x_1$ in `X`, all $x_2$ in `X2`, and $0 \leq l < L$.
149 :param X:
150 The first of the two batches of points to evaluate the phi
151 product at. An array of shape [N, <axis>], where N is the
152 number of points and <axis> is the shape of the arrays that
153 represent the points in a given space.
154 :param X2:
155 The second of the two batches of points to evaluate the phi
156 product at. An array of shape [N2, <axis>], where N2 is the
157 number of points and <axis> is the shape of the arrays that
158 represent the points in a given space.
160 Defaults to None, in which case X is used for X2.
161 :param ``**kwargs``:
162 Any additional parameters.
164 :return:
165 An array of shape [N, N2, L].
166 """
167 raise NotImplementedError
169 @abc.abstractmethod
170 def phi_product_diag(self, X: B.Numeric, **kwargs):
171 r"""
172 Computes the diagonals of the matrices
173 ``phi_product(X, X, **kwargs)[:, :, l]``, $0 \leq l < L$.
175 :param X:
176 As in :meth:`phi_product`.
177 :param ``**kwargs``:
178 As in :meth:`phi_product`.
180 :return:
181 An array of shape [N, L].
182 """
183 raise NotImplementedError
185 def __call__(self, X: B.Numeric, **kwargs) -> B.Numeric:
186 """
187 Evaluate the individual eigenfunctions at a batch of input locations.
189 :param X:
190 Points to evaluate the eigenfunctions at, an array of
191 shape [N, <axis>], where N is the number of points and <axis> is
192 the shape of the arrays that represent the points in a given space.
193 :param ``**kwargs``:
194 Any additional parameters.
196 :return:
197 An [N, J]-shaped array, where `J` is the number of eigenfunctions.
198 """
199 raise NotImplementedError
201 @property
202 def num_eigenfunctions(self) -> int:
203 """
204 The number J of eigenfunctions.
205 """
206 return sum(self.num_eigenfunctions_per_level)
208 @property
209 def num_levels(self) -> int:
210 """
211 The number L of levels.
212 """
213 return len(self.num_eigenfunctions_per_level)
215 @abc.abstractproperty
216 def num_eigenfunctions_per_level(self) -> List[int]:
217 r"""
218 The number of eigenfunctions per level: list of $d_l$, $0 \leq l < L$.
219 """
220 raise NotImplementedError
223class EigenfunctionsWithAdditionTheorem(Eigenfunctions):
224 r"""
225 Eigenfunctions for which the sum of outer products over a level has a
226 simpler expression.
228 .. note::
229 See a brief introduction into the notion of addition theorems in the
230 :doc:`respective documentation page </theory/addition_theorem>`.
231 """
233 def phi_product(
234 self, X: B.Numeric, X2: Optional[B.Numeric] = None, **kwargs
235 ) -> B.Numeric:
236 return self._addition_theorem(X, X2, **kwargs)
238 def phi_product_diag(self, X: B.Numeric, **kwargs) -> B.Numeric:
239 return self._addition_theorem_diag(X, **kwargs)
241 @abc.abstractmethod
242 def _addition_theorem(
243 self, X: B.Numeric, X2: Optional[B.Numeric] = None, **kwargs
244 ) -> B.Numeric:
245 """
246 Basically, an implementation of :meth:`phi_product`.
247 """
248 raise NotImplementedError
250 @abc.abstractmethod
251 def _addition_theorem_diag(self, X: B.Numeric, **kwargs) -> B.Numeric:
252 """
253 Basically, an implementation of :meth:`phi_product_diag`.
254 """
255 raise NotImplementedError
258class EigenfunctionsFromEigenvectors(Eigenfunctions):
259 """
260 Turns an array of eigenvectors into an :class:`Eigenfunctions` instance.
261 The resulting eigenfunctions' inputs are given by the indices.
263 Each individual eigenfunction corresponds to a separate level.
265 :param eigenvectors:
266 Array of shape [D, J] containing J eigenvectors of dimension D.
268 :param index_from_one:
269 If True, the indices are assumed to be 1-based. If False, they are
270 assumed to be 0-based. Defaults to False.
271 """
273 def __init__(self, eigenvectors: B.Numeric, index_from_one: bool = False):
274 self.eigenvectors = eigenvectors
275 self.index_from_one = index_from_one
277 def weighted_outerproduct(
278 self,
279 weights: B.Numeric,
280 X: B.Numeric,
281 X2: Optional[B.Numeric] = None, # type: ignore
282 **kwargs,
283 ) -> B.Numeric:
284 Phi_X = self.__call__(X, **kwargs) # [N, J]
285 if X2 is None:
286 Phi_X2 = Phi_X
287 else:
288 Phi_X2 = self.__call__(X2, **kwargs) # [N2, J]
290 Phi_X = B.cast(B.dtype(weights), Phi_X)
291 Phi_X2 = B.cast(B.dtype(weights), Phi_X2)
293 Kxx = B.matmul(B.transpose(weights) * Phi_X, Phi_X2, tr_b=True) # [N, N2]
294 return Kxx
296 def weighted_outerproduct_diag(
297 self, weights: B.Numeric, X: B.Numeric, **kwargs
298 ) -> B.Numeric:
299 Phi_X = self.__call__(X, **kwargs) # [N, J]
300 Kx = B.sum(B.transpose(weights) * Phi_X**2, axis=1) # [N,]
301 return Kx
303 def phi_product(
304 self, X: B.Numeric, X2: Optional[B.Numeric] = None, **kwargs
305 ) -> B.Numeric:
306 if X2 is None:
307 X2 = X
308 Phi_X = self.__call__(X, **kwargs) # [N, J]
309 Phi_X2 = self.__call__(X2, **kwargs) # [N2, J]
310 return B.einsum("nl,ml->nml", Phi_X, Phi_X2) # [N, N2, J]
312 def phi_product_diag(self, X: B.Numeric, **kwargs):
313 Phi_X = self.__call__(X, **kwargs) # [N, J]
314 return Phi_X**2
316 def __call__(self, X: B.Numeric, **kwargs) -> B.Numeric:
317 """
318 Takes the values of the `J` stored eigenvectors `self.eigenvectors`
319 that correspond to the indices in `X`.
321 :param X:
322 Indices, an array of shape [N, 1].
323 :param ``**kwargs``:
324 Ignored.
326 :return:
327 An array of shape [N, J], whose element with index (n, j)
328 corresponds to the X[n]-th element of the j-th eigenvector.
329 """
330 indices = B.cast(B.dtype_int(X), X)
331 if self.index_from_one:
332 # assert B.all(indices >= 1) removed because of tensorflow's
333 # OperatorNotAllowedInGraphError
334 Phi = take_along_axis(self.eigenvectors, indices - 1, axis=0)
335 else:
336 # assert B.all(indices >= 0) removed because of tensorflow's
337 # OperatorNotAllowedInGraphError
338 Phi = take_along_axis(self.eigenvectors, indices, axis=0)
339 return Phi
341 @property
342 def num_eigenfunctions(self) -> int:
343 return B.shape(self.eigenvectors)[-1]
345 @property
346 def num_levels(self) -> int:
347 """
348 Number of levels.
350 Returns J, same as :meth:`num_eigenfunctions`.
351 """
352 return self.num_eigenfunctions
354 @property
355 def num_eigenfunctions_per_level(self) -> List[int]:
356 """
357 Number of eigenfunctions per level.
359 Returns a list of J ones.
360 """
361 return [1] * self.num_levels