diff --git a/docs/source/conf.py b/docs/source/conf.py index 8a8538d9..1300c9de 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -92,7 +92,7 @@ 'github_url': 'https://github.com/PTB-MR/mrpro/main', } linkcode_blob = html_context['github_version'] - +default_role = 'any' def get_lambda_source(obj): """Convert lambda to source code.""" @@ -244,11 +244,25 @@ def sync_notebooks(source_folder, dest_folder): dest_file = dest / src_file.name if not dest_file.exists() or src_file.stat().st_mtime > dest_file.stat().st_mtime: shutil.copy2(src_file, dest_file) +import nbformat +import re +def replace_patterns_in_markdown(app, docname, source): + """Replace patterns like `module.class` with {any}`module.class` in Markdown cells.""" + if not '_notebooks' in docname: + return + notebook = nbformat.reads(source[0], as_version=4) + for cell in notebook.cells: + if cell["cell_type"] == "markdown": + # Replace with `text` with {any}`text`. leave ``text`` as is. + cell["source"] = re.sub(r"(? None: + """Plot images.""" + n_images = len(images) + _, axes = plt.subplots(1, n_images, squeeze=False, figsize=(n_images * 3, 3)) + for i in range(n_images): + axes[0][i].imshow(images[i], cmap='gray') + axes[0][i].axis('off') + if titles: + axes[0][i].set_title(titles[i]) + plt.show() + + +# %% +# Combine data from different coils and show magntiude image +magnitude_fully_sampled = img.abs().square().sum(dim=-4).sqrt().squeeze() +show_images(magnitude_fully_sampled) # %% [markdown] # Great! That was very easy! Let's try to reconstruct the next dataset. # %% [markdown] # ## Reconstruction of acquisition with partial echo and partial Fourier -# %% inputHidden=true outputHidden=true tags=["remove-output"] +# %% tags=["remove-output"] # Read in the data kdata_pe_pf = KData.from_file(data_folder / 'cart_t1_partial_echo_partial_fourier.mrd', KTrajectoryCartesian()) @@ -140,36 +152,36 @@ (img_pe_pf,) = fft_op.adjoint(kdata_pe_pf.data) # Combine data from different coils using root-sum-of-squares -img_pe_pf = torch.sqrt(torch.sum(img_pe_pf**2, dim=-4)).abs().squeeze() +magnitude_pe_pf = img_pe_pf.abs().square().sum(dim=-4).sqrt().squeeze() + # Plot both images -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(img_pe_pf) -plt.show() +show_images(magnitude_fully_sampled, magnitude_pe_pf, titles=['fully sampled', 'PF & PE']) # %% [markdown] # Well, we got an image, but when we compare it to the previous result, it seems like the head has shrunk. # Since that's extremely unlikely, there's probably a mistake in our reconstruction. # # Let's step back and check out the trajectories for both scans. - # %% print(kdata.traj) # %% [markdown] -# We see that the trajectory has kz, ky, and kx components. Kx and ky only vary along one dimension. -# This is because MRpro saves the trajectory in the most efficient way. -# To get the full trajectory as a tensor, we can just call as_tensor(). +# We see that the trajectory has ``kz``, ``ky``, and ``kx`` components. ``kx`` and ``ky`` only vary along one dimension. +# This is because MRpro saves meta data such as trajectories in an efficient way, where dimensions in which the data +# does not change are often collapsed. The original shape can be obtained by +# [broadcasting](https://numpy.org/doc/stable/user/basics.broadcasting.html). +# Here, to get the full trajectory as a tensor, we can also just call `~mrpro.data.KTrajectory.as_tensor()`: # %% # Plot the fully sampled trajectory (in blue) -plt.plot(kdata.traj.as_tensor()[2, 0, 0, :, :].flatten(), kdata.traj.as_tensor()[1, 0, 0, :, :].flatten(), 'ob') +full_kz, full_ky, full_kx = kdata.traj.as_tensor() +plt.plot(full_ky[0, 0].flatten(), full_kx[0, 0].flatten(), 'ob') # Plot the partial echo and partial Fourier trajectory (in red) -plt.plot( - kdata_pe_pf.traj.as_tensor()[2, 0, 0, :, :].flatten(), kdata_pe_pf.traj.as_tensor()[1, 0, 0, :, :].flatten(), '+r' -) +full_kz, full_ky, full_kx = kdata_pe_pf.traj.as_tensor() +plt.plot(full_ky[0, 0].flatten(), full_kx[0, 0].flatten(), '+r') + plt.show() # %% [markdown] # We see that for the fully sampled acquisition, the k-space is covered symmetrically from -256 to 255 along the @@ -180,7 +192,7 @@ # between encoding and recon matrix needs to be zero-padded symmetrically. # # To take the asymmetric acquisition into account and sort the data correctly into a matrix where we can apply the -# FFT-operator to, we have got the {py:class}`~mrpro.operators.CartesianSamplingOp` in MRpro. This operator performs +# FFT-operator to, we have got the `~mrpro.operators.CartesianSamplingOp` in MRpro. This operator performs # sorting based on the k-space trajectory and the dimensions of the encoding k-space. # # Let's try it out! @@ -191,27 +203,40 @@ cart_sampling_op = CartesianSamplingOp(encoding_matrix=kdata_pe_pf.header.encoding_matrix, traj=kdata_pe_pf.traj) # %% [markdown] -# Now, we first apply the CartesianSamplingOp and then call the FFT-operator. +# Now, we first apply the adjoint CartesianSamplingOp and then call the adjoint FFT-operator. # %% (img_pe_pf,) = fft_op.adjoint(cart_sampling_op.adjoint(kdata_pe_pf.data)[0]) -img_pe_pf = torch.sqrt(torch.sum(img_pe_pf**2, dim=-4)).abs().squeeze() +magnitude_pe_pf = img_pe_pf.abs().square().sum(dim=-4).sqrt().squeeze() -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(img_pe_pf) +show_images(magnitude_fully_sampled, magnitude_pe_pf, titles=['fully sampled', 'PF & PE']) -# %% [markdown] # %% [markdown] # Voila! We've got the same brains, and they're the same size! -# -# But wait a second—something still looks a bit off. In the bottom left corner, it seems like there's a "hole" + +# %% [markdown] +# In MRpro, we have a smart `~mrpro.operators.FourierOp` operator, that automatically does the resorting and can +# handle non-cartesian data as well. For cartesian data, it internally does exactly the two steps we just did manually. +# The operator can be also be created from an existing `~mrpro.data.KData` object +# This is the recommended way to transform k-space data. + +# %% +from mrpro.operators import FourierOp + +fourier_op = FourierOp.from_kdata(kdata_pe_pf) +(img_pe_pf,) = fourier_op.adjoint(kdata_pe_pf.data) +magnitude_pe_pf = img_pe_pf.abs().square().sum(dim=-4).sqrt().squeeze() +show_images(magnitude_fully_sampled, magnitude_pe_pf, titles=['fully sampled', 'PF & PE']) + +# %% [markdown] +# That was easy! +# But wait a second — something still looks a bit off. In the bottom left corner, it seems like there's a "hole" # in the brain. That definitely shouldn't be there. # # The issue is that we combined the data from the different coils using a root-sum-of-squares approach. # While it's simple, it's not the ideal method. Typically, coil sensitivity maps are calculated to combine the data # from different coils. In MRpro, you can do this by calculating coil sensitivity data and then creating a -# {py:class}`~mrpro.operators.SensitivityOp` to combine the data after image reconstruction. +# `mrpro.operators.SensitivityOp` to combine the data after image reconstruction. # %% [markdown] @@ -223,21 +248,19 @@ from mrpro.operators import SensitivityOp # Calculate coil sensitivity maps -(img_pe_pf,) = fft_op.adjoint(cart_sampling_op.adjoint(kdata_pe_pf.data)[0]) +(magnitude_pe_pf,) = fft_op.adjoint(cart_sampling_op.adjoint(kdata_pe_pf.data)[0]) # This algorithms is designed to calculate coil sensitivity maps for each other dimension. -csm_data = walsh(img_pe_pf[0, ...], smoothing_width=5)[None, ...] +csm_data = walsh(magnitude_pe_pf[0, ...], smoothing_width=5)[None, ...] # Create SensitivityOp csm_op = SensitivityOp(csm_data) # Reconstruct coil-combined image -(img_pe_pf,) = csm_op.adjoint(fft_op.adjoint(cart_sampling_op.adjoint(kdata_pe_pf.data)[0])[0]) -img_pe_pf = img_pe_pf.abs().squeeze() +(img_pe_pf,) = csm_op.adjoint(*fourier_op.adjoint(img_pe_pf)) +magnitude_pe_pf = magnitude_pe_pf.abs().squeeze() +show_images(magnitude_fully_sampled, magnitude_pe_pf, titles=['fully sampled', 'PF & PE']) -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(img_pe_pf.squeeze()) # %% [markdown] # Tada! The "hole" is gone, and the image looks much better. @@ -249,25 +272,22 @@ # %% # Create composite operator -acq_op = cart_sampling_op @ fft_op @ csm_op -(img_pe_pf,) = acq_op.adjoint(kdata_pe_pf.data) -img_pe_pf = img_pe_pf.abs().squeeze() - -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(img_pe_pf) +adjoint_operator = (fourier_op @ csm_op).H +(magnitude_pe_pf,) = adjoint_operator(kdata_pe_pf.data) +magnitude_pe_pf = magnitude_pe_pf.abs().squeeze() +show_images(magnitude_pe_pf, titles=['PF & PE']) # %% [markdown] # Although we now have got a nice looking image, it was still a bit cumbersome to create it. We had to define several # different operators and chain them together. Wouldn't it be nice if this could be done automatically? # # That is why we also included some top-level reconstruction algorithms in MRpro. For this whole steps from above, -# we can simply call a `{py:class}`~mrpro.algorithnms.reconstruction.DirectReconstruction`. -# Reconstruction algorithms can be instantiated from only the information in the `KData` object. +# we can simply use a `mrpro.algorithnms.reconstruction.DirectReconstruction`. +# Reconstruction algorithms can be instantiated from only the information in the `~mrpro.data.KData` object. # # In contrast to operators, top-level reconstruction algorithms operate on the data objects of MRpro, i.e. the input is -# a `{py:class}`~mrpro.data.KData` object and the output is an `{py:class}`~mrpro.data.IData` object containing -# the reconstructed image data. To get its magnitude as tensor, we can call the {py:meth}`~mrpro.data.IData.rss` method. +# a `~mrpro.data.KData` object and the output is an `~mrpro.data.IData` object containing +# the reconstructed image data. To get its magnitude, we can call the `~mrpro.data.IData.rss` method. # %% from mrpro.algorithms.reconstruction import DirectReconstruction @@ -278,13 +298,10 @@ # Reconstruct image by calling the DirectReconstruction object idat_pe_pf = direct_recon_pe_pf(kdata_pe_pf) -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(idat_pe_pf.rss().squeeze()) # %% [markdown] # This is much simpler — everything happens in the background, so we don't have to worry about it. -# Let's try it on the undersampled dataset now. +# Let's finally try it on the undersampled dataset now. # %% [markdown] @@ -295,86 +312,12 @@ direct_recon_us = DirectReconstruction(kdata_us) idat_us = direct_recon_us(kdata_us) -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(idat_us.rss().squeeze()) +show_images(idat_pe_pf.rss().squeeze(), idat_us.rss().squeeze(), titles=['PE & PF', 'Undersampled']) # %% [markdown] # As expected, we can see undersampling artifacts in the image. In order to get rid of them, we can use an iterative # SENSE algorithm. As you might have guessed, this is also included in MRpro. -# Similarly to the `DirectReconstruction`, we can create an `IterativeSENSEReconstruction` and apply it to the -# undersampled data. -# -# One important thing to keep in mind is that this only works if the coil maps that we use do not have any -# undersampling artifacts. Commonly, we would get them from a fully sampled self-calibration reference lines in the -# center of k-space or a separate coil sensitivity scan. -# -# As a first step, we are going to assume that we have got a nice fully sampled reference scan like our partial echo and -# partial Fourier acquisition. We can get the `CsmData`, which is needed for the `IterativeSENSEReconstruction`, from -# the previous reconstruction. - -# %% -from mrpro.algorithms.reconstruction import IterativeSENSEReconstruction - -it_sense_recon = IterativeSENSEReconstruction(kdata=kdata_us, csm=direct_recon_pe_pf.csm) -idat_us = it_sense_recon(kdata_us) - -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(idat_us.rss().squeeze()) - -# %% [markdown] -# That worked well, but in practice, we don't want to acquire a fully sampled version of our scan just to -# reconstruct it. A more efficient approach is to get a few self-calibration lines in the center of k-space -# to create a low-resolution, fully sampled image. -# -# In our scan, these lines are part of the dataset, but they aren't used for image reconstruction since -# they're only meant for calibration (i.e., coil sensitivity map calculation). Because they're not labeled -# for imaging, MRpro ignores them by default when reading the data. However, we can set a flag when calling -# `from_file` to read in just those lines for reconstructing the coil sensitivity maps. - -# %% -from mrpro.data.acq_filters import is_coil_calibration_acquisition - -kdata_calib_lines = KData.from_file( - data_folder / 'cart_t1_msense_integrated.mrd', - KTrajectoryCartesian(), - acquisition_filter_criterion=lambda acq: is_coil_calibration_acquisition(acq), -) - -direct_recon_calib_lines = DirectReconstruction(kdata_calib_lines) -im_calib_lines = direct_recon_calib_lines(kdata_calib_lines) - -plt.imshow(im_calib_lines.rss().squeeze()) - -# %% [markdown] -# Although this only yields a low-resolution image, it is good enough to calculate coil sensitivity maps. - -# %% -# Visualize coil sensitivity maps of all 16 coils -assert direct_recon_calib_lines.csm is not None # needed for type checking -fig, ax = plt.subplots(4, 4, squeeze=False) -for idx, cax in enumerate(ax.flatten()): - cax.imshow(direct_recon_calib_lines.csm.data[0, idx, 0, ...].abs()) - -# %% [markdown] -# Now, we can use these coil sensitivity maps to reconstruct our SENSE scan. - -# %% -it_sense_recon = IterativeSENSEReconstruction(kdata_us, csm=direct_recon_calib_lines.csm) -idat_us = it_sense_recon(kdata_us) - -fig, ax = plt.subplots(1, 2, squeeze=False) -ax[0, 0].imshow(img_fully_sampled) -ax[0, 1].imshow(idat_us.rss().squeeze()) - -# %% [markdown] -# %% [markdown] -# The final image is a little worse (nothing beats fully sampled high-resolution scans for coil map -# calculation), but we've managed to get rid of the undersampling artifacts inside the brain. If you want to -# further improve the coil sensitivity map quality, try: -# - using different methods to calculate them, e.g. `mrpro.algorithms.csm.inati` -# - playing around with the parameters of these methods -# - applying a smoothing filter on the images (or ideally directly in k-space) used to calculate the coil -# sensitivity maps +# Similarly to the `~mrpro.algorithms.reconstruction.DirectReconstruction`, +# we can use an `~mrpro.algorithms.reconstruction.IterativeSENSEReconstruction`. +# For more information, see diff --git a/src/mrpro/operators/FourierOp.py b/src/mrpro/operators/FourierOp.py index 84f7ba02..5b75265d 100644 --- a/src/mrpro/operators/FourierOp.py +++ b/src/mrpro/operators/FourierOp.py @@ -27,7 +27,7 @@ class FourierOp(LinearOperator, adjoint_as_backward=True): It also includes padding/cropping to the reconstruction matrix size. The operator can directly be constructed from a :py:class:`KData` object to match its - trajectory and header information, see :py:func:`FouruerOp.from_kdata` + trajectory and header information, see :py:func:`FourierOp.from_kdata` """