Skip to content

Commit

Permalink
gram in operator nom
Browse files Browse the repository at this point in the history
  • Loading branch information
fzimmermann89 committed Nov 14, 2024
1 parent 199e2e8 commit f8963a8
Showing 1 changed file with 7 additions and 7 deletions.
14 changes: 7 additions & 7 deletions src/mrpro/operators/LinearOperator.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,8 @@ def operator_norm(
if max_iterations < 1:
raise ValueError('The number of iterations should be larger than zero.')

# check that the norm of the starting value is not zero
dim = tuple(dim) if dim is not None else dim # must be tuple or None for torch.sum

norm_initial_value = torch.linalg.vector_norm(initial_value, dim=dim, keepdim=True)
if not (norm_initial_value > 0).all():
if dim is None:
Expand All @@ -156,19 +157,18 @@ def operator_norm(
should be different from the zero-vector.'
)

# set initial value
vector = initial_value

# creaty dummy operator norm value that cannot be correct because by definition, the
# operator norm is a strictly positive number. This ensures that the first time the
# change between the old and the new estimate of the operator norm is non-zero and
# thus prevents the loop from exiting despite a non-correct estimate.
op_norm_old = torch.zeros(*tuple([1 for _ in range(vector.ndim)]), device=vector.device)
op_norm_old = torch.zeros(*tuple([1 for _ in range(initial_value.ndim)]), device=initial_value.device)

dim = tuple(dim) if dim is not None else dim
gram = self.gram # self.H@self

vector = initial_value
for _ in range(max_iterations):
# apply the operator to the vector
(vector_new,) = self.adjoint(*self(vector))
(vector_new,) = gram(vector)

# compute estimate of the operator norm
product = vector.real * vector_new.real
Expand Down

0 comments on commit f8963a8

Please sign in to comment.