From 8f544e85d378e627b70f785cc600448315b3156a Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Mon, 12 Oct 2015 11:11:42 -0500 Subject: [PATCH 01/15] STY: PEP8 formatting --- generate_cartesian_shepp_logan_dataset.py | 95 +++++----- ismrmrdtools/__init__.py | 3 +- ismrmrdtools/coils.py | 35 ++-- ismrmrdtools/grappa.py | 200 +++++++++++++--------- ismrmrdtools/ndarray_io.py | 36 ++-- ismrmrdtools/sense.py | 33 ++-- ismrmrdtools/show.py | 16 +- ismrmrdtools/simulation.py | 155 +++++++++-------- ismrmrdtools/transform.py | 4 +- parallel_imaging_demo.py | 112 ++++++------ recon_ismrmrd_dataset.py | 44 ++--- recon_multi_reps.py | 85 ++++----- setup.py | 18 +- 13 files changed, 463 insertions(+), 373 deletions(-) diff --git a/generate_cartesian_shepp_logan_dataset.py b/generate_cartesian_shepp_logan_dataset.py index 8a3e205..7456f02 100644 --- a/generate_cartesian_shepp_logan_dataset.py +++ b/generate_cartesian_shepp_logan_dataset.py @@ -1,42 +1,45 @@ # coding: utf-8 -import os import ismrmrd import ismrmrd.xsd from ismrmrdtools import simulation, transform import numpy as np import argparse -def create(filename='testdata.h5', matrix_size=256, coils=8, oversampling=2, repetitions=1, acceleration=1, noise_level=0.05): + +def create(filename='testdata.h5', matrix_size=256, coils=8, oversampling=2, + repetitions=1, acceleration=1, noise_level=0.05): # Generate the phantom and coil sensitivity maps phan = simulation.phantom(matrix_size) csm = simulation.generate_birdcage_sensitivities(matrix_size, coils) - coil_images = np.tile(phan,(coils, 1, 1)) * csm + coil_images = np.tile(phan, (coils, 1, 1)) * csm # Oversample if needed - if oversampling>1: + if oversampling > 1: padding = (oversampling*phan.shape[1] - phan.shape[1])/2 - phan = np.pad(phan,((0,0),(padding,padding)),mode='constant') - csm = np.pad(csm,((0,0),(0,0),(padding,padding)),mode='constant') - coil_images = np.pad(coil_images,((0,0),(0,0),(padding,padding)),mode='constant') + phan = np.pad(phan, ((0, 0), (padding, padding)), mode='constant') + csm = np.pad( + csm, ((0, 0), (0, 0), (padding, padding)), mode='constant') + coil_images = np.pad( + coil_images, ((0, 0), (0, 0), (padding, padding)), mode='constant') # The number of points in x,y,kx,ky nx = matrix_size ny = matrix_size nkx = oversampling*nx nky = ny - + # Open the dataset dset = ismrmrd.Dataset(filename, "dataset", create_if_needed=True) - + # Create the XML header and write it to the file header = ismrmrd.xsd.ismrmrdHeader() - + # Experimental Conditions exp = ismrmrd.xsd.experimentalConditionsType() exp.H1resonanceFrequency_Hz = 128000000 header.experimentalConditions = exp - + # Acquisition System Information sys = ismrmrd.xsd.acquisitionSystemInformationType() sys.receiverChannels = coils @@ -45,7 +48,7 @@ def create(filename='testdata.h5', matrix_size=256, coils=8, oversampling=2, rep # Encoding encoding = ismrmrd.xsd.encoding() encoding.trajectory = ismrmrd.xsd.trajectoryType.cartesian - + # encoded and recon spaces efov = ismrmrd.xsd.fieldOfView_mm() efov.x = oversampling*256 @@ -55,7 +58,7 @@ def create(filename='testdata.h5', matrix_size=256, coils=8, oversampling=2, rep rfov.x = 256 rfov.y = 256 rfov.z = 5 - + ematrix = ismrmrd.xsd.matrixSize() ematrix.x = nkx ematrix.y = nky @@ -64,53 +67,53 @@ def create(filename='testdata.h5', matrix_size=256, coils=8, oversampling=2, rep rmatrix.x = nx rmatrix.y = ny rmatrix.z = 1 - + espace = ismrmrd.xsd.encodingSpaceType() espace.matrixSize = ematrix espace.fieldOfView_mm = efov rspace = ismrmrd.xsd.encodingSpaceType() rspace.matrixSize = rmatrix rspace.fieldOfView_mm = rfov - + # Set encoded and recon spaces encoding.encodedSpace = espace encoding.reconSpace = rspace - + # Encoding limits limits = ismrmrd.xsd.encodingLimitsType() - + limits1 = ismrmrd.xsd.limitType() limits1.minimum = 0 limits1.center = ny/2 limits1.maximum = ny - 1 limits.kspace_encoding_step_1 = limits1 - + limits_rep = ismrmrd.xsd.limitType() limits_rep.minimum = 0 limits_rep.center = repetitions / 2 limits_rep.maximum = repetitions - 1 limits.repetition = limits_rep - + limits_rest = ismrmrd.xsd.limitType() limits_rest.minimum = 0 limits_rest.center = 0 limits_rest.maximum = 0 limits.kspace_encoding_step_0 = limits_rest - limits.slice = limits_rest + limits.slice = limits_rest limits.average = limits_rest limits.contrast = limits_rest limits.kspaceEncodingStep2 = limits_rest limits.phase = limits_rest limits.segment = limits_rest limits.set = limits_rest - + encoding.encodingLimits = limits header.encoding.append(encoding) - dset.write_xml_header(header.toxml('utf-8')) + dset.write_xml_header(header.toxml('utf-8')) # Synthesize the k-space data - Ktrue = transform.transform_image_to_kspace(coil_images,(1,2)) + Ktrue = transform.transform_image_to_kspace(coil_images, (1, 2)) # Create an acquistion and reuse it acq = ismrmrd.Acquisition() @@ -124,41 +127,44 @@ def create(filename='testdata.h5', matrix_size=256, coils=8, oversampling=2, rep # Initialize an acquisition counter counter = 0 - + # Write out a few noise scans for n in range(32): - noise = noise_level * (np.random.randn(coils, nkx) + 1j * np.random.randn(coils, nkx)) + noise = noise_level * \ + (np.random.randn(coils, nkx) + 1j * np.random.randn(coils, nkx)) # here's where we would make the noise correlated acq.scan_counter = counter acq.clearAllFlags() acq.setFlag(ismrmrd.ACQ_IS_NOISE_MEASUREMENT) acq.data[:] = noise dset.append_acquisition(acq) - counter += 1 # increment the scan counter - + counter += 1 # increment the scan counter + # Loop over the repetitions, add noise and write to disk # simulating a T-SENSE type scan for rep in range(repetitions): - noise = noise_level * (np.random.randn(coils, nky, nkx) + 1j * np.random.randn(coils, nky, nkx)) + noise = noise_level * \ + (np.random.randn(coils, nky, nkx) + + 1j * np.random.randn(coils, nky, nkx)) # here's where we would make the noise correlated K = Ktrue + noise acq.idx.repetition = rep for acc in range(acceleration): - for line in np.arange(acc,nky,acceleration): + for line in np.arange(acc, nky, acceleration): # set some fields in the header acq.scan_counter = counter acq.idx.kspace_encode_step_1 = line acq.clearAllFlags() if line == 0: - acq.setFlag(ismrmrd.ACQ_FIRST_IN_ENCODE_STEP1) - acq.setFlag(ismrmrd.ACQ_FIRST_IN_SLICE) - acq.setFlag(ismrmrd.ACQ_FIRST_IN_REPETITION) + acq.setFlag(ismrmrd.ACQ_FIRST_IN_ENCODE_STEP1) + acq.setFlag(ismrmrd.ACQ_FIRST_IN_SLICE) + acq.setFlag(ismrmrd.ACQ_FIRST_IN_REPETITION) elif line == nky - 1: - acq.setFlag(ismrmrd.ACQ_LAST_IN_ENCODE_STEP1) - acq.setFlag(ismrmrd.ACQ_LAST_IN_SLICE) - acq.setFlag(ismrmrd.ACQ_LAST_IN_REPETITION) + acq.setFlag(ismrmrd.ACQ_LAST_IN_ENCODE_STEP1) + acq.setFlag(ismrmrd.ACQ_LAST_IN_SLICE) + acq.setFlag(ismrmrd.ACQ_LAST_IN_REPETITION) # set the data and append - acq.data[:] = K[:,line,:] + acq.data[:] = K[:, line, :] dset.append_acquisition(acq) counter += 1 @@ -167,22 +173,27 @@ def create(filename='testdata.h5', matrix_size=256, coils=8, oversampling=2, rep def main(): - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-o', '--output', help='output filename') - parser.add_argument('-m', '--matrix-size', type=int, dest='matrix_size', help='k-space matrix size') + parser.add_argument('-m', '--matrix-size', type=int, dest='matrix_size', + help='k-space matrix size') parser.add_argument('-c', '--coils', type=int, help='number of coils') parser.add_argument('-s', '--oversampling', type=int, help='oversampling') - parser.add_argument('-r', '--repetitions', type=int, help='number of repetitions') + parser.add_argument('-r', '--repetitions', type=int, + help='number of repetitions') parser.add_argument('-a', '--acceleration', type=int, help='acceleration') - parser.add_argument('-n', '--noise-level', type=float, dest='noise_level', help='noise level') + parser.add_argument('-n', '--noise-level', type=float, dest='noise_level', + help='noise level') parser.set_defaults(output='testdata.h5', matrix_size=256, coils=8, - oversampling=2, repetitions=1, acceleration=1, noise_level=0.05) + oversampling=2, repetitions=1, acceleration=1, + noise_level=0.05) args = parser.parse_args() create(args.output, args.matrix_size, args.coils, args.oversampling, - args.repetitions, args.acceleration, args.noise_level) + args.repetitions, args.acceleration, args.noise_level) if __name__ == "__main__": main() diff --git a/ismrmrdtools/__init__.py b/ismrmrdtools/__init__.py index cc78936..da952cb 100644 --- a/ismrmrdtools/__init__.py +++ b/ismrmrdtools/__init__.py @@ -2,4 +2,5 @@ ISMRMRD Python Tools """ -__all__ = ["sense", "transform", "show", "simulation", "coils", "grappa", "ndarray_io"] +__all__ = ["sense", "transform", "show", "simulation", "coils", "grappa" + "ndarray_io"] diff --git a/ismrmrdtools/coils.py b/ismrmrdtools/coils.py index 8b3785a..515d938 100644 --- a/ismrmrdtools/coils.py +++ b/ismrmrdtools/coils.py @@ -17,7 +17,6 @@ def calculate_prewhitening(noise, scale_factor=1.0): :returns w: Prewhitening matrix, ``[coil, coil]``, w*data is prewhitened ''' - noise_int = noise.reshape((noise.shape[0], noise.size/noise.shape[0])) M = float(noise_int.shape[1]) dmtx = (1/(M-1))*np.asmatrix(noise_int)*np.asmatrix(noise_int).H @@ -25,7 +24,8 @@ def calculate_prewhitening(noise, scale_factor=1.0): dmtx = dmtx*np.sqrt(2)*np.sqrt(scale_factor) return dmtx -def apply_prewhitening(data,dmtx): + +def apply_prewhitening(data, dmtx): '''Apply the noise prewhitening matrix :param noise: Input noise data (array or matrix), ``[coil, ...]`` @@ -35,20 +35,22 @@ def apply_prewhitening(data,dmtx): ''' s = data.shape - return np.asarray(np.asmatrix(dmtx)*np.asmatrix(data.reshape(data.shape[0],data.size/data.shape[0]))).reshape(s) + return np.asarray( + np.asmatrix(dmtx) * np.asmatrix( + data.reshape(data.shape[0], data.size/data.shape[0]))).reshape(s) def calculate_csm_walsh(img, smoothing=5, niter=3): '''Calculates the coil sensitivities for 2D data using an iterative version of the Walsh method :param img: Input images, ``[coil, y, x]`` - :param smoothing: Smoothing block size (default ``5``) + :param smoothing: Smoothing +block size (default ``5``) :parma niter: Number of iterations for the eigenvector power method (default ``3``) :returns csm: Relative coil sensitivity maps, ``[coil, y, x]`` :returns rho: Total power in the estimated coils maps, ``[y, x]`` ''' - assert img.ndim == 3, "Coil sensitivity map must have exactly 3 dimensions" ncoils = img.shape[0] @@ -56,36 +58,35 @@ def calculate_csm_walsh(img, smoothing=5, niter=3): nx = img.shape[2] # Compute the sample covariance pointwise - Rs = np.zeros((ncoils,ncoils,ny,nx),dtype=img.dtype) + Rs = np.zeros((ncoils, ncoils, ny, nx), dtype=img.dtype) for p in range(ncoils): for q in range(ncoils): - Rs[p,q,:,:] = img[p,:,:] * np.conj(img[q,:,:]) + Rs[p, q, :, :] = img[p, :, :] * np.conj(img[q, :, :]) # Smooth the covariance for p in range(ncoils): for q in range(ncoils): - Rs[p,q] = smooth(Rs[p,q,:,:], smoothing) + Rs[p, q] = smooth(Rs[p, q, :, :], smoothing) # At each point in the image, find the dominant eigenvector # and corresponding eigenvalue of the signal covariance # matrix using the power method rho = np.zeros((ny, nx)) - csm = np.zeros((ncoils, ny, nx),dtype=img.dtype) + csm = np.zeros((ncoils, ny, nx), dtype=img.dtype) for y in range(ny): for x in range(nx): - R = Rs[:,:,y,x] - v = np.sum(R,axis=0) + R = Rs[:, :, y, x] + v = np.sum(R, axis=0) lam = np.linalg.norm(v) v = v/lam for iter in range(niter): - v = np.dot(R,v) + v = np.dot(R, v) lam = np.linalg.norm(v) v = v/lam - rho[y,x] = lam - csm[:,y,x] = v - + rho[y, x] = lam + csm[:, y, x] = v return (csm, rho) @@ -234,8 +235,8 @@ def smooth(img, box=5): t_real = np.zeros(img.shape) t_imag = np.zeros(img.shape) - ndimage.filters.uniform_filter(img.real,size=box,output=t_real) - ndimage.filters.uniform_filter(img.imag,size=box,output=t_imag) + ndimage.filters.uniform_filter(img.real, size=box, output=t_real) + ndimage.filters.uniform_filter(img.imag, size=box, output=t_imag) simg = t_real + 1j*t_imag diff --git a/ismrmrdtools/grappa.py b/ismrmrdtools/grappa.py index 65d572c..e20ac90 100644 --- a/ismrmrdtools/grappa.py +++ b/ismrmrdtools/grappa.py @@ -1,8 +1,11 @@ import numpy as np -from numpy.fft import fftshift, ifftshift,ifftn +from numpy.fft import fftshift, ifftshift, ifftn import coils -def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4,5), data_mask=None, csm=None, regularization_factor=0.001, target_data=None): + +def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4, 5), + data_mask=None, csm=None, + regularization_factor=0.001, target_data=None): '''Calculates unmixing coefficients for a 2D image using a GRAPPA algorithm :param source_data: k-space source data ``[coils, y, x]`` @@ -14,7 +17,7 @@ def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4,5), data_m - 0 = no regularization - set higher for more aggressive regularization. :param target_data: If target data differs from source data (defaults to source_data) - + :returns unmix: Image unmixing coefficients for a single ``x`` location, ``[coil, y, x]`` :returns gmap: Noise enhancement map, ``[y, x]`` ''' @@ -23,61 +26,76 @@ def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4,5), data_m ny = source_data.shape[1] nc_source = source_data.shape[0] - if target_data is None: target_data = source_data - + if data_mask is None: data_mask = np.ones((ny, nx)) - + nc_target = target_data.shape[0] - + if csm is None: - #Assume calibration data is in the middle - f = np.asarray(np.asmatrix(np.hamming(np.max(np.sum(data_mask,0)))).T * np.asmatrix(np.hamming(np.max(np.sum(data_mask,1))))) - fmask = np.zeros((source_data.shape[1],source_data.shape[2]),dtype=np.complex64) - idx = np.argwhere(data_mask==1) - fmask[idx[:,0],idx[:,1]] = f.reshape(idx.shape[0]) - fmask = np.tile(fmask[None,:,:],(nc_source,1,1)) - csm = fftshift(ifftn(ifftshift(source_data * fmask, axes=(1,2)), axes=(1,2)), axes=(1,2)) - (csm,rho) = coils.calculate_csm_walsh(csm) - - - kernel = np.zeros((nc_target,nc_source,kernel_size[0]*acc_factor,kernel_size[1]),dtype=np.complex64) + # Assume calibration data is in the middle + f = np.asarray(np.asmatrix(np.hamming(np.max( + np.sum(data_mask, 0)))).T * np.asmatrix( + np.hamming(np.max(np.sum(data_mask, 1))))) + fmask = np.zeros( + (source_data.shape[1], source_data.shape[2]), dtype=np.complex64) + idx = np.argwhere(data_mask == 1) + fmask[idx[:, 0], idx[:, 1]] = f.reshape(idx.shape[0]) + fmask = np.tile(fmask[None, :, :], (nc_source, 1, 1)) + csm = fftshift( + ifftn( + ifftshift(source_data * fmask, axes=(1, 2)), + axes=(1, 2)), + axes=(1, 2)) + (csm, rho) = coils.calculate_csm_walsh(csm) + + kernel = np.zeros((nc_target, nc_source, kernel_size[0]*acc_factor, + kernel_size[1]), dtype=np.complex64) sampled_indices = np.nonzero(data_mask) - kx_cal = (sampled_indices[1][0],sampled_indices[1][-1]) - ky_cal = (sampled_indices[0][0],sampled_indices[0][-1]) - - for s in range(0,acc_factor): - kernel_mask = np.zeros((kernel_size[0]*acc_factor, kernel_size[1]),dtype=np.int8) - kernel_mask[s:kernel_mask.shape[0]:acc_factor,:] = 1 - s_data = source_data[:,ky_cal[0]:ky_cal[1],kx_cal[0]:kx_cal[1]] - t_data = target_data[:,ky_cal[0]:ky_cal[1],kx_cal[0]:kx_cal[1]] - k = estimate_convolution_kernel(s_data,kernel_mask,regularization_factor=regularization_factor,target_data=t_data) + kx_cal = (sampled_indices[1][0], sampled_indices[1][-1]) + ky_cal = (sampled_indices[0][0], sampled_indices[0][-1]) + + for s in range(0, acc_factor): + kernel_mask = np.zeros( + (kernel_size[0]*acc_factor, kernel_size[1]), dtype=np.int8) + kernel_mask[s:kernel_mask.shape[0]:acc_factor, :] = 1 + s_data = source_data[:, ky_cal[0]:ky_cal[1], kx_cal[0]:kx_cal[1]] + t_data = target_data[:, ky_cal[0]:ky_cal[1], kx_cal[0]:kx_cal[1]] + k = estimate_convolution_kernel( + s_data, kernel_mask, regularization_factor=regularization_factor, + target_data=t_data) kernel = kernel + k - #return kernel - - kernel = kernel[:,:,::-1,::-1] #flip kernel in preparation for convolution - - csm_ss = np.sum(csm * np.conj(csm),0) + # return kernel + + # flip kernel in preparation for convolution + kernel = kernel[:, :, ::-1, ::-1] + + csm_ss = np.sum(csm * np.conj(csm), 0) csm_ss = csm_ss + 1.0*(csm_ss < np.spacing(1)).astype('float32') - - unmix = np.zeros(source_data.shape,dtype=np.complex64) - - for c in range(0,nc_target): - kernel_pad = _pad_kernel(kernel[c,:,:,:],unmix.shape) - kernel_pad = fftshift(ifftn(ifftshift(kernel_pad, axes=(1,2)), axes=(1,2)), axes=(1,2)) + + unmix = np.zeros(source_data.shape, dtype=np.complex64) + + for c in range(0, nc_target): + kernel_pad = _pad_kernel(kernel[c, :, :, :], unmix.shape) + kernel_pad = fftshift( + ifftn(ifftshift(kernel_pad, axes=(1, 2)), axes=(1, 2)), axes=(1, 2)) kernel_pad *= unmix.shape[1]*unmix.shape[2] - unmix = unmix + (kernel_pad * np.tile(np.conj(csm[c,:,:]) /csm_ss,(nc_source,1,1))) + unmix = unmix + \ + (kernel_pad * + np.tile(np.conj(csm[c, :, :]) / csm_ss, (nc_source, 1, 1))) unmix /= acc_factor - gmap = np.squeeze(np.sqrt(np.sum(abs(unmix) ** 2, 0))) * np.squeeze(np.sqrt(np.sum(abs(csm) ** 2, 0))) - - - return (unmix.astype('complex64'),gmap.astype('float32')) - -def estimate_convolution_kernel(source_data, kernel_mask, regularization_factor=0.001, target_data=None): + gmap = np.squeeze(np.sqrt(np.sum(abs(unmix) ** 2, 0))) * \ + np.squeeze(np.sqrt(np.sum(abs(csm) ** 2, 0))) + + return (unmix.astype('complex64'), gmap.astype('float32')) + + +def estimate_convolution_kernel(source_data, kernel_mask, + regularization_factor=0.001, target_data=None): '''Estimates a 2D k-space convolution kernel (as used in GRAPPA or SPIRiT) :param source_data: k-space source data ``[coils, y, x]`` @@ -92,60 +110,74 @@ def estimate_convolution_kernel(source_data, kernel_mask, regularization_factor= :returns gmap: Noise enhancement map, ``[y, x]`` ''' - if target_data is None: target_data = source_data assert source_data.ndim == 3, "Source data must have exactly 3 dimensions" assert target_data.ndim == 3, "Targe data must have exactly 3 dimensions" assert kernel_mask.ndim == 2, "Kernel mask must have exactly 2 dimensions" - + nc_source = source_data.shape[0] nc_target = target_data.shape[0] - offsets = np.argwhere(kernel_mask==1) - offsets[:,0] -= kernel_mask.shape[0]/2 - offsets[:,1] -= kernel_mask.shape[1]/2 - ky_range = (0-np.min(offsets[:,0]),source_data.shape[1]-np.max(offsets[:,0])) - kx_range = (0-np.min(offsets[:,1]),source_data.shape[2]-np.max(offsets[:,1])) - + offsets = np.argwhere(kernel_mask == 1) + offsets[:, 0] -= kernel_mask.shape[0]/2 + offsets[:, 1] -= kernel_mask.shape[1]/2 + ky_range = ( + 0-np.min(offsets[:, 0]), source_data.shape[1]-np.max(offsets[:, 0])) + kx_range = ( + 0-np.min(offsets[:, 1]), source_data.shape[2]-np.max(offsets[:, 1])) + equations = (ky_range[1]-ky_range[0])*(kx_range[1]-kx_range[0]) - unknowns = offsets.shape[0]*nc_source - - - A = np.asmatrix(np.zeros((equations, unknowns),dtype=np.complex128)) + unknowns = offsets.shape[0]*nc_source + + A = np.asmatrix(np.zeros((equations, unknowns), dtype=np.complex128)) b = np.asmatrix(np.zeros((equations, nc_target), dtype=np.complex128)) - - for sc in range(0,nc_source): - for p in range(0,offsets.shape[0]): - A[:,sc*offsets.shape[0]+p] = source_data[sc,(ky_range[0]+offsets[p,0]):(ky_range[1]+offsets[p,0]),(kx_range[0]+offsets[p,1]):(kx_range[1]+offsets[p,1])].reshape((equations,1)) - for tc in range(0,nc_target): - b[:,tc] = target_data[tc,ky_range[0]:ky_range[1],kx_range[0]:kx_range[1]].reshape((equations,1)) - - + for sc in range(0, nc_source): + for p in range(0, offsets.shape[0]): + yslice = slice(ky_range[0]+offsets[p, 0], + ky_range[1]+offsets[p, 0]) + xslice = slice(kx_range[0]+offsets[p, 1], + kx_range[1]+offsets[p, 1]) + A[:, sc*offsets.shape[0]+p] = source_data[ + sc, yslice, xslice].reshape((equations, 1)) + for tc in range(0, nc_target): + b[:, tc] = target_data[ + tc, ky_range[0]:ky_range[1], + kx_range[0]:kx_range[1]].reshape((equations, 1)) + if A.shape[0] < 3*A.shape[1]: - print("Warning: number of samples in calibration data might be insufficient") - - - S = np.linalg.svd(A,compute_uv=False) - A_inv = np.dot(np.linalg.pinv(np.dot(A.H,A) + np.eye(A.shape[1])*(regularization_factor*np.max(np.abs(S)))**2),A.H) - x = np.dot(A_inv,b) - - offsets = np.argwhere(kernel_mask==1) - kernel = np.zeros((nc_target,nc_source, kernel_mask.shape[0],kernel_mask.shape[1]),dtype=np.complex64) - for tc in range(0,nc_target): - for sc in range(0,nc_source): - for p in range(0,offsets.shape[0]): - kernel[tc,sc,offsets[p,0],offsets[p,1]] = x[sc*offsets.shape[0]+p,tc] - + print("Warning: number of samples in calibration data might be " + "insufficient") + + S = np.linalg.svd(A, compute_uv=False) + A_inv = np.dot(np.linalg.pinv(np.dot(A.H, A) + np.eye(A.shape[1]) * + (regularization_factor*np.max(np.abs(S)))**2), A.H) + x = np.dot(A_inv, b) + + offsets = np.argwhere(kernel_mask == 1) + kernel = np.zeros((nc_target, nc_source, kernel_mask.shape[ + 0], kernel_mask.shape[1]), dtype=np.complex64) + for tc in range(0, nc_target): + for sc in range(0, nc_source): + for p in range(0, offsets.shape[0]): + kernel[tc, sc, offsets[p, 0], offsets[p, 1]] = x[ + sc*offsets.shape[0]+p, tc] + return kernel -def _pad_kernel(gkernel,padded_shape): - assert gkernel.ndim == 3, "Kernel padding routine must take 3 dimensional kernel" - padded_kernel = np.zeros(padded_shape,dtype=np.complex64) + +def _pad_kernel(gkernel, padded_shape): + if gkernel.ndim != 3: + raise ValueError("Kernel padding routine must take 3 dimensional " + "kernel") + padded_kernel = np.zeros(padded_shape, dtype=np.complex64) padding = np.asarray(padded_shape)-np.asarray(gkernel.shape) padding_before = (padding+1)/2 - padded_kernel[padding_before[0]:(padding_before[0]+gkernel.shape[0]),padding_before[1]:(padding_before[1]+gkernel.shape[1]),padding_before[2]:(padding_before[2]+gkernel.shape[2])] = gkernel + pad_slices = [ + slice(padding_before[0], padding_before[0]+gkernel.shape[0]), + slice(padding_before[1], padding_before[1]+gkernel.shape[1]), + slice(padding_before[2], padding_before[2]+gkernel.shape[2])] + padded_kernel[pad_slices] = gkernel return padded_kernel - diff --git a/ismrmrdtools/ndarray_io.py b/ismrmrdtools/ndarray_io.py index 519d6b1..5763a70 100644 --- a/ismrmrdtools/ndarray_io.py +++ b/ismrmrdtools/ndarray_io.py @@ -1,7 +1,8 @@ import numpy as np from struct import unpack -def write_ndarray(filename,ndarray): + +def write_ndarray(filename, ndarray): '''Writes simple ndarray format. This format is mostly used for debugging purposes The file name extension indicates the binary data format: @@ -16,7 +17,7 @@ def write_ndarray(filename,ndarray): :param filename: Name of file containing array (extension appended automatically) :param ndarray: Numpy array ''' - + datatype = ndarray.dtype if datatype == np.dtype(np.float32): fullfilename = filename + str('.float') @@ -27,20 +28,21 @@ def write_ndarray(filename,ndarray): elif datatype == np.dtype(np.complex128): fullfilename = filename + str('.dplx') else: - raise Exception('Unsupported data type') - - f = open(fullfilename,'wb') - dims = np.zeros((ndarray.ndim+1,1),dtype=np.int32) - dims[0] = ndarray.ndim; - for d in range(0,ndarray.ndim): + raise Exception('Unsupported data type') + + f = open(fullfilename, 'wb') + dims = np.zeros((ndarray.ndim+1, 1), dtype=np.int32) + dims[0] = ndarray.ndim + for d in range(0, ndarray.ndim): dims[d+1] = ndarray.shape[ndarray.ndim-d-1] - f.write(dims.tobytes()) + f.write(dims.tobytes()) f.write(ndarray.tobytes()) + def read_ndarray(filename): '''Reads simple ndarray format. This format is mostly used for debugging purposes The file name extension indicates the binary data format: - + '*.float' indicates float32 '*.double' indicates float64 @@ -57,7 +59,7 @@ def read_ndarray(filename): if filename.endswith('.float'): datatype = np.dtype(np.float32) elif filename.endswith('.double'): - datatype = np.dtype(np.float64) + datatype = np.dtype(np.float64) elif filename.endswith('.cplx'): datatype = np.dtype(np.complex64) elif filename.endswith('.dplx'): @@ -65,18 +67,18 @@ def read_ndarray(filename): else: raise Exception('Unknown file name extension') - f = open(filename,'rb') + f = open(filename, 'rb') ndims = f.read(4) ndims = unpack(' 0: -# unmix1d[:,b:ny:nblocks] = np.linalg.pinv(A) - AHA = A.H * A; - reduced_eye = np.diag(np.abs(np.diag(AHA))>0) + # unmix1d[:,b:ny:nblocks] = np.linalg.pinv(A) + AHA = A.H * A + reduced_eye = np.diag(np.abs(np.diag(AHA)) > 0) n_alias = np.sum(reduced_eye) scaled_reg_factor = regularization_factor * np.trace(AHA)/n_alias - unmix1d[:,b:ny:nblocks] = np.linalg.pinv(AHA + (reduced_eye*scaled_reg_factor)) * A.H + unmix1d[:, b:ny:nblocks] = np.linalg.pinv( + AHA + (reduced_eye*scaled_reg_factor)) * A.H return unmix1d diff --git a/ismrmrdtools/show.py b/ismrmrdtools/show.py index 73824f8..bd5b709 100644 --- a/ismrmrdtools/show.py +++ b/ismrmrdtools/show.py @@ -6,6 +6,7 @@ import matplotlib.pyplot as plt from matplotlib.widgets import RectangleSelector + def imshow(image_matrix, tile_shape=None, scale=None, titles=[], colorbar=False, cmap='jet'): """ Tiles images and displays them in a window. @@ -15,10 +16,12 @@ def imshow(image_matrix, tile_shape=None, scale=None, titles=[], colorbar=False, :param titles: optional list of titles for each subplot :param cmap: optional colormap for all images """ - assert image_matrix.ndim in [2, 3], "image_matrix must have 2 or 3 dimensions" + assert image_matrix.ndim in [ + 2, 3], "image_matrix must have 2 or 3 dimensions" if image_matrix.ndim == 2: - image_matrix = image_matrix.reshape((1, image_matrix.shape[0], image_matrix.shape[1])) + image_matrix = image_matrix.reshape( + (1, image_matrix.shape[0], image_matrix.shape[1])) if not scale: scale = (np.min(image_matrix), np.max(image_matrix)) @@ -27,7 +30,7 @@ def imshow(image_matrix, tile_shape=None, scale=None, titles=[], colorbar=False, if not tile_shape: tile_shape = (1, image_matrix.shape[0]) assert np.prod(tile_shape) >= image_matrix.shape[0],\ - "image tile rows x columns must equal the 3rd dim extent of image_matrix" + "image tile rows x columns must equal the 3rd dim extent of image_matrix" # add empty titles as necessary if len(titles) < image_matrix.shape[0]: @@ -35,7 +38,7 @@ def imshow(image_matrix, tile_shape=None, scale=None, titles=[], colorbar=False, if len(titles) > 0: assert len(titles) >= image_matrix.shape[0],\ - "number of titles must equal 3rd dim extent of image_matrix" + "number of titles must equal 3rd dim extent of image_matrix" def onselect(eclick, erelease): print((eclick.xdata, eclick.ydata), (erelease.xdata, erelease.ydata)) @@ -47,7 +50,7 @@ def on_pick(event): A = im.get_array() print(A[y, x]) - selectors = [] # need to keep a reference to each selector + selectors = [] # need to keep a reference to each selector rectprops = dict(facecolor='red', edgecolor='black', alpha=0.5, fill=True) cols, rows = tile_shape fig = plt.figure() @@ -56,7 +59,8 @@ def on_pick(event): ax = fig.add_subplot(cols, rows, z+1) ax.set_title(titles[z]) ax.set_axis_off() - imgplot = ax.imshow(image_matrix[z,:,:], vmin=vmin, vmax=vmax, picker=True) + imgplot = ax.imshow( + image_matrix[z, :, :], vmin=vmin, vmax=vmax, picker=True) selectors.append(RectangleSelector(ax, onselect, rectprops=rectprops)) if colorbar is True: diff --git a/ismrmrdtools/simulation.py b/ismrmrdtools/simulation.py index 899328e..08c1630 100644 --- a/ismrmrdtools/simulation.py +++ b/ismrmrdtools/simulation.py @@ -5,52 +5,58 @@ import numpy as np from ismrmrdtools import transform + def sample_data(img_obj, csm, acc=1, ref=0, sshift=0): -#% Samples the k-space of object provided in 'img_obj' after first applying -#% coil sensitivity maps in 'csm' and Fourier transforming to k-space. -#% -#% INPUT: -#% - img_obj [x,y] : Object in image space -#% - csm [x,y,c] : Coil sensitivity maps -#% - acc scalar : Acceleration factor -#% - ref scalar : Reference lines (in center of k-space) -#% - sshift scalar : Sampling shift, i.e for undersampling, do we -#% start with line 1 or line 1+sshift? -#% -#% OUPUT: -#% - data [kx,ky,c]: Sampled data in k-space (zeros where not sampled) -#% - pat [kx,ky,c]: Sampling pattern (0 = not sampled, -#% 1 = imaging data, -#% 2 = reference data, -#% 3 = reference and imaging data) -#% -#% -#% Code made available for the ISMRM 2013 Sunrise Educational Course -#% -#% Michael S. Hansen (michael.hansen@nih.gov) -#% - - sshift = sshift%acc; - + """ + Samples the k-space of object provided in 'img_obj' after first applying + coil sensitivity maps in 'csm' and Fourier transforming to k-space. + + INPUT: + - img_obj [x,y] : Object in image space + - csm [x,y,c] : Coil sensitivity maps + - acc scalar : Acceleration factor + - ref scalar : Reference lines (in center of k-space) + - sshift scalar : Sampling shift, i.e for undersampling, do we + start with line 1 or line 1+sshift? + + OUPUT: + - data [kx,ky,c]: Sampled data in k-space (zeros where not sampled) + - pat [kx,ky,c]: Sampling pattern (0 = not sampled, + 1 = imaging data, + 2 = reference data, + 3 = reference and imaging data) + + + Code made available for the ISMRM 2013 Sunrise Educational Course + + Michael S. Hansen (michael.hansen@nih.gov) + """ + + sshift = sshift % acc + assert img_obj.ndim == 2, "Only two dimensional objects supported at the moment" assert csm.ndim == 3, "csm must be a 3 dimensional array" - assert img_obj.shape[0] == csm.shape[1], "Object and csm dimension mismatch" - assert img_obj.shape[1] == csm.shape[2], "Object and csm dimension mismatch" + assert img_obj.shape[0] == csm.shape[ + 1], "Object and csm dimension mismatch" + assert img_obj.shape[1] == csm.shape[ + 2], "Object and csm dimension mismatch" - pat_img = np.zeros(img_obj.shape,dtype=np.int8) - pat_img[sshift:-1:acc,:] = 1 - pat_ref = np.zeros(img_obj.shape,dtype=np.int8) - if ref>0: - pat_ref[(0+img_obj.shape[0]/2):(ref+img_obj.shape[0]/2),:] = 2 + pat_img = np.zeros(img_obj.shape, dtype=np.int8) + pat_img[sshift:-1:acc, :] = 1 + pat_ref = np.zeros(img_obj.shape, dtype=np.int8) + if ref > 0: + pat_ref[(0+img_obj.shape[0]/2):(ref+img_obj.shape[0]/2), :] = 2 pat = pat_img + pat_ref - - coil_images = np.tile(img_obj,(csm.shape[0], 1, 1)) * csm - data = transform.transform_image_to_kspace(coil_images,dim=(1,2)) - data = data * (np.tile(pat,(csm.shape[0], 1, 1))>0).astype('float32') - return (data,pat) -def generate_birdcage_sensitivities(matrix_size = 256, number_of_coils = 8, relative_radius = 1.5, normalize=True): + coil_images = np.tile(img_obj, (csm.shape[0], 1, 1)) * csm + data = transform.transform_image_to_kspace(coil_images, dim=(1, 2)) + data = data * (np.tile(pat, (csm.shape[0], 1, 1)) > 0).astype('float32') + return (data, pat) + + +def generate_birdcage_sensitivities(matrix_size=256, number_of_coils=8, + relative_radius=1.5, normalize=True): """ Generates birdcage coil sensitivites. :param matrix_size: size of imaging matrix in pixels (default ``256``) @@ -61,32 +67,35 @@ def generate_birdcage_sensitivities(matrix_size = 256, number_of_coils = 8, rela Jeff Fessler's IRT package: http://web.eecs.umich.edu/~fessler/code/ """ - out = np.zeros((number_of_coils,matrix_size,matrix_size),dtype=np.complex64) - for c in range(0,number_of_coils): + out = np.zeros( + (number_of_coils, matrix_size, matrix_size), dtype=np.complex64) + for c in range(0, number_of_coils): coilx = relative_radius*np.cos(c*(2*np.pi/number_of_coils)) coily = relative_radius*np.sin(c*(2*np.pi/number_of_coils)) coil_phase = -c*(2*np.pi/number_of_coils) - for y in range(0,matrix_size): + for y in range(0, matrix_size): y_co = float(y-matrix_size/2)/float(matrix_size/2)-coily - for x in range(0,matrix_size): + for x in range(0, matrix_size): x_co = float(x-matrix_size/2)/float(matrix_size/2)-coilx rr = np.sqrt(x_co**2+y_co**2) phi = np.arctan2(x_co, -y_co) + coil_phase - out[c,y,x] = (1/rr) * np.exp(1j*phi) + out[c, y, x] = (1/rr) * np.exp(1j*phi) if normalize: - rss = np.squeeze(np.sqrt(np.sum(abs(out) ** 2, 0))) - out = out / np.tile(rss,(number_of_coils,1,1)) + rss = np.squeeze(np.sqrt(np.sum(abs(out) ** 2, 0))) + out = out / np.tile(rss, (number_of_coils, 1, 1)) return out -def phantom (matrix_size = 256, phantom_type = 'Modified Shepp-Logan', ellipses = None): +def phantom(matrix_size=256, phantom_type='Modified Shepp-Logan', + ellipses=None): """ Create a Shepp-Logan or modified Shepp-Logan phantom:: - phantom (n = 256, phantom_type = 'Modified Shepp-Logan', ellipses = None) + phantom (n = 256, phantom_type = 'Modified Shepp-Logan', + ellipses = None) :param matrix_size: size of imaging matrix in pixels (default 256) @@ -130,50 +139,52 @@ def phantom (matrix_size = 256, phantom_type = 'Modified Shepp-Logan', ellipses """ if (ellipses is None): - ellipses = _select_phantom (phantom_type) - elif (np.size (ellipses, 1) != 6): - raise AssertionError ("Wrong number of columns in user phantom") + ellipses = _select_phantom(phantom_type) + elif (np.size(ellipses, 1) != 6): + raise AssertionError("Wrong number of columns in user phantom") - ph = np.zeros ((matrix_size, matrix_size),dtype=np.float32) + ph = np.zeros((matrix_size, matrix_size), dtype=np.float32) # Create the pixel grid ygrid, xgrid = np.mgrid[-1:1:(1j*matrix_size), -1:1:(1j*matrix_size)] for ellip in ellipses: - I = ellip [0] - a2 = ellip [1]**2 - b2 = ellip [2]**2 - x0 = ellip [3] - y0 = ellip [4] - phi = ellip [5] * np.pi / 180 # Rotation angle in radians + I = ellip[0] + a2 = ellip[1]**2 + b2 = ellip[2]**2 + x0 = ellip[3] + y0 = ellip[4] + phi = ellip[5] * np.pi / 180 # Rotation angle in radians # Create the offset x and y values for the grid x = xgrid - x0 y = ygrid - y0 - cos_p = np.cos (phi) - sin_p = np.sin (phi) + cos_p = np.cos(phi) + sin_p = np.sin(phi) # Find the pixels within the ellipse locs = (((x * cos_p + y * sin_p)**2) / a2 - + ((y * cos_p - x * sin_p)**2) / b2) <= 1 + + ((y * cos_p - x * sin_p)**2) / b2) <= 1 # Add the ellipse intensity to those pixels - ph [locs] += I + ph[locs] += I return ph -def _select_phantom (name): - if (name.lower () == 'shepp-logan'): - e = _shepp_logan () - elif (name.lower () == 'modified shepp-logan'): - e = _mod_shepp_logan () + +def _select_phantom(name): + if (name.lower() == 'shepp-logan'): + e = _shepp_logan() + elif (name.lower() == 'modified shepp-logan'): + e = _mod_shepp_logan() else: - raise ValueError ("Unknown phantom type: %s" % name) + raise ValueError("Unknown phantom type: %s" % name) return e + def _shepp_logan (): - # Standard head phantom, taken from Shepp & Logan + """ Standard head phantom, taken from Shepp & Logan. """ return [[ 2, .69, .92, 0, 0, 0], [-.98, .6624, .8740, 0, -.0184, 0], [-.02, .1100, .3100, .22, 0, -18], @@ -185,9 +196,11 @@ def _shepp_logan (): [ .01, .0230, .0230, 0, -.606, 0], [ .01, .0230, .0460, .06, -.605, 0]] + def _mod_shepp_logan (): - # Modified version of Shepp & Logan's head phantom, - # adjusted to improve contrast. Taken from Toft. + """ Modified version of Shepp & Logan's head phantom, adjusted to improve + contrast. Taken from Toft. + """ return [[ 1, .69, .92, 0, 0, 0], [-.80, .6624, .8740, 0, -.0184, 0], [-.20, .1100, .3100, .22, 0, -18], diff --git a/ismrmrdtools/transform.py b/ismrmrdtools/transform.py index d414247..611d819 100644 --- a/ismrmrdtools/transform.py +++ b/ismrmrdtools/transform.py @@ -4,6 +4,7 @@ import numpy as np from numpy.fft import fftshift, ifftshift, fftn, ifftn + def transform_kspace_to_image(k, dim=None, img_shape=None): """ Computes the Fourier transform from k-space to image space along a given or all dimensions @@ -16,7 +17,8 @@ def transform_kspace_to_image(k, dim=None, img_shape=None): if not dim: dim = range(k.ndim) - img = fftshift(ifftn(ifftshift(k, axes=dim), s=img_shape, axes=dim), axes=dim) + img = fftshift( + ifftn(ifftshift(k, axes=dim), s=img_shape, axes=dim), axes=dim) img *= np.sqrt(np.prod(np.take(img.shape, dim))) return img diff --git a/parallel_imaging_demo.py b/parallel_imaging_demo.py index cc630a5..80858b8 100644 --- a/parallel_imaging_demo.py +++ b/parallel_imaging_demo.py @@ -1,33 +1,38 @@ # -*- coding: utf-8 -*- #%% -#Basic setup +# Basic setup import numpy as np import scipy as sp -from ismrmrdtools import sense, grappa, show, simulation, transform,coils +from ismrmrdtools import sense, grappa, show, simulation, transform, coils #%% -#import some data +# import some data exercise_data = sp.io.loadmat('hansen_exercises2.mat') csm = np.transpose(exercise_data['smaps']) pat = np.transpose(exercise_data['sp']) data = np.transpose(exercise_data['data']) -kspace = np.logical_or(pat==1,pat==3).astype('float32')*(data) +kspace = np.logical_or(pat == 1, pat == 3).astype('float32')*(data) acc_factor = 4 -alias_img = transform.transform_kspace_to_image(kspace,dim=(1,2)) * np.sqrt(acc_factor) +alias_img = transform.transform_kspace_to_image( + kspace, dim=(1, 2)) * np.sqrt(acc_factor) show.imshow(abs(alias_img)) -(unmix_grappa,gmap_grappa) = grappa.calculate_grappa_unmixing(data, acc_factor, data_mask=pat>1, csm=csm,kernel_size=(4,5)) -#(unmix_grappa,gmap_grappa) = grappa.calculate_grappa_unmixing(data, acc_factor, data_mask=pat>1) -show.imshow(abs(gmap_grappa),colorbar=True) -recon_grappa = np.squeeze(np.sum(alias_img * unmix_grappa,0)) -show.imshow(abs(recon_grappa),colorbar=True) +(unmix_grappa, gmap_grappa) = grappa.calculate_grappa_unmixing( + data, acc_factor, data_mask=pat > 1, csm=csm, kernel_size=(4, 5)) +# (unmix_grappa, gmap_grappa) = grappa.calculate_grappa_unmixing( +# data, acc_factor, data_mask=pat>1) +show.imshow(abs(gmap_grappa), colorbar=True) +recon_grappa = np.squeeze(np.sum(alias_img * unmix_grappa, 0)) +show.imshow(abs(recon_grappa), colorbar=True) -sp.io.savemat('tmp_data.mat',{'pat_py': pat,'data_py': data,'csm_py': csm,'alias_img_py':alias_img,'unmix_grappa_py':unmix_grappa}) +sp.io.savemat('tmp_data.mat', {'pat_py': pat, 'data_py': data, 'csm_py': csm, + 'alias_img_py': alias_img, + 'unmix_grappa_py': unmix_grappa}) #%% -#Reload some modules +# Reload some modules reload(show) reload(sense) reload(grappa) @@ -40,67 +45,74 @@ matrix_size = 256 csm = simulation.generate_birdcage_sensitivities(matrix_size) phan = simulation.phantom(matrix_size) -coil_images = np.tile(phan,(8, 1, 1)) * csm -show.imshow(abs(coil_images),tile_shape=(4,2),colorbar=True) +coil_images = np.tile(phan, (8, 1, 1)) * csm +show.imshow(abs(coil_images), tile_shape=(4, 2), colorbar=True) #%% -#Undersample +# Undersample reload(simulation) acc_factor = 2 ref_lines = 16 -(data,pat) = simulation.sample_data(phan,csm,acc_factor,ref_lines) +(data, pat) = simulation.sample_data(phan, csm, acc_factor, ref_lines) #%% -#Add noise -noise = np.random.standard_normal(data.shape) + 1j*np.random.standard_normal(data.shape) +# Add noise +noise = np.random.standard_normal( + data.shape) + 1j*np.random.standard_normal(data.shape) noise = (5.0/matrix_size)*noise -kspace = np.logical_or(pat==1,pat==3).astype('float32')*(data + noise) -data = (pat>0).astype('float32')*(data + noise) +kspace = np.logical_or(pat == 1, pat == 3).astype('float32')*(data + noise) +data = (pat > 0).astype('float32')*(data + noise) #%% -#Calculate the noise prewhitening matrix +# Calculate the noise prewhitening matrix dmtx = coils.calculate_prewhitening(noise) #%% # Apply prewhitening -kspace = coils.apply_prewhitening(kspace, dmtx) -data = coils.apply_prewhitening(data, dmtx) +kspace = coils.apply_prewhitening(kspace, dmtx) +data = coils.apply_prewhitening(data, dmtx) #%% -#Reconstruct aliased images -alias_img = transform.transform_kspace_to_image(kspace,dim=(1,2)) * np.sqrt(acc_factor) +# Reconstruct aliased images +alias_img = transform.transform_kspace_to_image( + kspace, dim=(1, 2)) * np.sqrt(acc_factor) show.imshow(abs(alias_img)) #%% reload(sense) -(unmix_sense, gmap_sense) = sense.calculate_sense_unmixing(acc_factor,csm) -show.imshow(abs(gmap_sense),colorbar=True) -recon_sense = np.squeeze(np.sum(alias_img * unmix_sense,0)) -show.imshow(abs(recon_sense),colorbar=True) +(unmix_sense, gmap_sense) = sense.calculate_sense_unmixing(acc_factor, csm) +show.imshow(abs(gmap_sense), colorbar=True) +recon_sense = np.squeeze(np.sum(alias_img * unmix_sense, 0)) +show.imshow(abs(recon_sense), colorbar=True) #%% reload(grappa) -#(unmix_grappa,gmap_grappa) = grappa.calculate_grappa_unmixing(data, acc_factor, data_mask=pat>1, csm=csm,kernel_size=(2,5)) -(unmix_grappa,gmap_grappa) = grappa.calculate_grappa_unmixing(data, acc_factor, data_mask=pat>1,kernel_size=(2,5)) -show.imshow(abs(gmap_grappa),colorbar=True) -recon_grappa = np.squeeze(np.sum(alias_img * unmix_grappa,0)) -show.imshow(abs(recon_grappa),colorbar=True) - -#%% -#Pseudo replica example +# (unmix_grappa,gmap_grappa) = grappa.calculate_grappa_unmixing( +# data, acc_factor, data_mask=pat>1, csm=csm,kernel_size=(2,5)) +(unmix_grappa, gmap_grappa) = grappa.calculate_grappa_unmixing( + data, acc_factor, data_mask=pat > 1, kernel_size=(2, 5)) +show.imshow(abs(gmap_grappa), colorbar=True) +recon_grappa = np.squeeze(np.sum(alias_img * unmix_grappa, 0)) +show.imshow(abs(recon_grappa), colorbar=True) + +#%% +# Pseudo replica example reps = 255 -reps_sense = np.zeros((reps,recon_grappa.shape[0],recon_grappa.shape[1]),dtype=np.complex64) -reps_grappa = np.zeros((reps,recon_grappa.shape[0],recon_grappa.shape[1]),dtype=np.complex64) -for r in range(0,reps): - noise_r = np.random.standard_normal(kspace.shape) + 1j*np.random.standard_normal(kspace.shape) - kspace_r = np.logical_or(pat==1,pat==3).astype('float32')*(kspace + noise_r) - alias_img_r = transform.transform_kspace_to_image(kspace_r,dim=(1,2)) * np.sqrt(acc_factor) - reps_sense[r,:,:] = np.squeeze(np.sum(alias_img_r * unmix_sense,0)) - reps_grappa[r,:,:] = np.squeeze(np.sum(alias_img_r * unmix_grappa,0)) - -std_sense = np.std(np.real(reps_sense),0) -show.imshow(abs(std_sense),colorbar=True) -std_grappa = np.std(np.real(reps_grappa),0) -show.imshow(abs(std_grappa),colorbar=True) +reps_sense = np.zeros((reps, ) + recon_grappa.shape[:2], dtype=np.complex64) +reps_grappa = np.zeros((reps, ) + recon_grappa.shape[:2], dtype=np.complex64) +for r in range(0, reps): + noise_r = np.random.standard_normal( + kspace.shape) + 1j*np.random.standard_normal(kspace.shape) + kspace_r = np.logical_or(pat == 1, pat == 3).astype('float32') * \ + (kspace + noise_r) + alias_img_r = transform.transform_kspace_to_image( + kspace_r, dim=(1, 2)) * np.sqrt(acc_factor) + reps_sense[r, :, :] = np.squeeze(np.sum(alias_img_r * unmix_sense, 0)) + reps_grappa[r, :, :] = np.squeeze(np.sum(alias_img_r * unmix_grappa, 0)) + +std_sense = np.std(np.real(reps_sense), 0) +show.imshow(abs(std_sense), colorbar=True) +std_grappa = np.std(np.real(reps_grappa), 0) +show.imshow(abs(std_grappa), colorbar=True) diff --git a/recon_ismrmrd_dataset.py b/recon_ismrmrd_dataset.py index 65c0fec..2846379 100644 --- a/recon_ismrmrd_dataset.py +++ b/recon_ismrmrd_dataset.py @@ -51,10 +51,10 @@ ncontrasts = 1 # TODO loop through the acquisitions looking for noise scans -firstacq=0 +firstacq = 0 for acqnum in range(dset.number_of_acquisitions()): acq = dset.read_acquisition(acqnum) - + # TODO: Currently ignoring noise scans if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT): print("Found noise scan at acq ", acqnum) @@ -66,10 +66,11 @@ # Initialiaze a storage array -all_data = np.zeros((nreps, ncontrasts, nslices, ncoils, eNz, eNy, rNx), dtype=np.complex64) +all_data = np.zeros( + (nreps, ncontrasts, nslices, ncoils, eNz, eNy, rNx), dtype=np.complex64) # Loop through the rest of the acquisitions and stuff -for acqnum in range(firstacq,dset.number_of_acquisitions()): +for acqnum in range(firstacq, dset.number_of_acquisitions()): acq = dset.read_acquisition(acqnum) # TODO: this is where we would apply noise pre-whitening @@ -79,12 +80,12 @@ xline = transform.transform_kspace_to_image(acq.data, [1]) x0 = (eNx - rNx) / 2 x1 = (eNx - rNx) / 2 + rNx - xline = xline[:,x0:x1] - acq.resize(rNx,acq.active_channels,acq.trajectory_dimensions) + xline = xline[:, x0:x1] + acq.resize(rNx, acq.active_channels, acq.trajectory_dimensions) acq.center_sample = rNx/2 # need to use the [:] notation here to fill the data acq.data[:] = transform.transform_image_to_kspace(xline, [1]) - + # Stuff into the buffer rep = acq.idx.repetition contrast = acq.idx.contrast @@ -94,28 +95,31 @@ all_data[rep, contrast, slice, :, z, y, :] = acq.data # Reconstruct images -images = np.zeros((nreps, ncontrasts, nslices, eNz, eNy, rNx), dtype=np.float32) +images = np.zeros( + (nreps, ncontrasts, nslices, eNz, eNy, rNx), dtype=np.float32) for rep in range(nreps): for contrast in range(ncontrasts): for slice in range(nslices): # FFT - if eNz>1: - #3D - im = transform.transform_kspace_to_image(all_data[rep,contrast,slice,:,:,:,:], [1,2,3]) + if eNz > 1: + # 3D + im = transform.transform_kspace_to_image( + all_data[rep, contrast, slice, :, :, :, :], [1, 2, 3]) else: - #2D - im = transform.transform_kspace_to_image(all_data[rep,contrast,slice,:,0,:,:], [1,2]) + # 2D + im = transform.transform_kspace_to_image( + all_data[rep, contrast, slice, :, 0, :, :], [1, 2]) # Sum of squares im = np.sqrt(np.sum(np.abs(im) ** 2, 0)) - + # Stuff into the output - if eNz>1: - #3D - images[rep,contrast,slice,:,:,:] = im + if eNz > 1: + # 3D + images[rep, contrast, slice, :, :, :] = im else: - #2D - images[rep,contrast,slice,0,:,:] = im + # 2D + images[rep, contrast, slice, 0, :, :] = im # Show an image -show.imshow(np.squeeze(images[0,0,0,:,:,:])) +show.imshow(np.squeeze(images[0, 0, 0, :, :, :])) diff --git a/recon_multi_reps.py b/recon_multi_reps.py index 8e6e7c5..5c032ed 100644 --- a/recon_multi_reps.py +++ b/recon_multi_reps.py @@ -10,24 +10,22 @@ import ismrmrd import ismrmrd.xsd import numpy as np -import scipy as sp -from ismrmrdtools import show, transform, coils, grappa, sense +from ismrmrdtools import show, transform, coils, grappa #%% -#Convert data from siemens file with +# Convert data from siemens file with # siemens_to_ismrmrd -f meas_MID00032_FID22409_oil_gre_128_150reps_pause_alpha_10.dat -z 1 -o data_reps_noise.h5 # siemens_to_ismrmrd -f meas_MID00032_FID22409_oil_gre_128_150reps_pause_alpha_10.dat -z 2 -o data_reps_data.h5 # Data can be found in Gadgetron integration test datasets -#filename_noise = 'data_reps_noise.h5' -#filename_data = 'data_reps_data.h5' +# filename_noise = 'data_reps_noise.h5' +# filename_data = 'data_reps_data.h5' -filename_noise = 'tpat3_noise.h5' +filename_noise = 'tpat3_noise.h5' filename_data = 'tpat3_data.h5' - #%% # Read the noise data if not os.path.isfile(filename_noise): @@ -43,18 +41,19 @@ num_coils = a.active_channels noise_dwell_time = a.sample_time_us -noise = np.zeros((num_coils,noise_reps*noise_samples),dtype=np.complex64) +noise = np.zeros((num_coils, noise_reps*noise_samples), dtype=np.complex64) for acqnum in range(noise_reps): acq = noise_dset.read_acquisition(acqnum) - + # TODO: Currently ignoring noise scans if not acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT): - raise Exception("Errror: non noise scan found in noise calibration") + raise Exception("Errror: non noise scan found in noise calibration") + + noise[:, acqnum*noise_samples:acqnum * + noise_samples+noise_samples] = acq.data - noise[:,acqnum*noise_samples:acqnum*noise_samples+noise_samples] = acq.data - noise = noise.astype('complex64') - + #%% Read the actual data # Read the noise data if not os.path.isfile(filename_data): @@ -81,7 +80,7 @@ rFOVy = enc.reconSpace.fieldOfView_mm.y rFOVz = enc.reconSpace.fieldOfView_mm.z -#Parallel imaging factor +# Parallel imaging factor acc_factor = enc.parallelImaging.accelerationFactor.kspace_encoding_step_1 # Number of Slices, Reps, Contrasts, etc. @@ -100,12 +99,12 @@ ncontrasts = enc.encodingLimits.contrast.maximum + 1 else: ncontrasts = 1 - -# In case there are noise scans in the actual dataset, we will skip them. -firstacq=0 + +# In case there are noise scans in the actual dataset, we will skip them. +firstacq = 0 for acqnum in range(dset.number_of_acquisitions()): acq = dset.read_acquisition(acqnum) - + if acq.isFlagSet(ismrmrd.ACQ_IS_NOISE_MEASUREMENT): print("Found noise scan at acq ", acqnum) continue @@ -114,34 +113,36 @@ print("Imaging acquisition starts acq ", acqnum) break -#Calculate prewhiterner taking BWs into consideration +# Calculate prewhiterner taking BWs into consideration a = dset.read_acquisition(firstacq) data_dwell_time = a.sample_time_us noise_receiver_bw_ratio = 0.79 -dmtx = coils.calculate_prewhitening(noise,scale_factor=(data_dwell_time/noise_dwell_time)*noise_receiver_bw_ratio) +dmtx = coils.calculate_prewhitening( + noise, scale_factor=(data_dwell_time/noise_dwell_time)*noise_receiver_bw_ratio) + - #%% # Process the actual data -all_data = np.zeros((nreps, ncontrasts, nslices, ncoils, eNz, eNy, rNx), dtype=np.complex64) +all_data = np.zeros( + (nreps, ncontrasts, nslices, ncoils, eNz, eNy, rNx), dtype=np.complex64) # Loop through the rest of the acquisitions and stuff -for acqnum in range(firstacq,dset.number_of_acquisitions()): +for acqnum in range(firstacq, dset.number_of_acquisitions()): acq = dset.read_acquisition(acqnum) - acq_data_prw = coils.apply_prewhitening(acq.data,dmtx) + acq_data_prw = coils.apply_prewhitening(acq.data, dmtx) # Remove oversampling if needed if eNx != rNx: xline = transform.transform_kspace_to_image(acq_data_prw, [1]) x0 = (eNx - rNx) / 2 x1 = (eNx - rNx) / 2 + rNx - xline = xline[:,x0:x1] - acq.resize(rNx,acq.active_channels,acq.trajectory_dimensions) + xline = xline[:, x0:x1] + acq.resize(rNx, acq.active_channels, acq.trajectory_dimensions) acq.center_sample = rNx/2 # need to use the [:] notation here to fill the data acq.data[:] = transform.transform_image_to_kspace(xline, [1]) - + # Stuff into the buffer rep = acq.idx.repetition contrast = acq.idx.contrast @@ -154,25 +155,27 @@ #%% # Coil combination -coil_images = transform.transform_kspace_to_image(np.squeeze(np.mean(all_data,0)),(1,2)) -(csm,rho) = coils.calculate_csm_walsh(coil_images) -csm_ss = np.sum(csm * np.conj(csm),0) +coil_images = transform.transform_kspace_to_image( + np.squeeze(np.mean(all_data, 0)), (1, 2)) +(csm, rho) = coils.calculate_csm_walsh(coil_images) +csm_ss = np.sum(csm * np.conj(csm), 0) csm_ss = csm_ss + 1.0*(csm_ss < np.spacing(1)).astype('float32') if acc_factor > 1: - coil_data = np.squeeze(np.mean(all_data,0)) + coil_data = np.squeeze(np.mean(all_data, 0)) reload(grappa) - (unmix,gmap) = grappa.calculate_grappa_unmixing(coil_data, acc_factor) + (unmix, gmap) = grappa.calculate_grappa_unmixing(coil_data, acc_factor) #(unmix,gmap) = sense.calculate_sense_unmixing(acc_factor,csm) - show.imshow(abs(gmap),colorbar=True,scale=(1,2)) - -recon = np.zeros((nreps, ncontrasts, nslices, eNz, eNy, rNx), dtype=np.complex64) -for r in range(0,nreps): - recon_data = transform.transform_kspace_to_image(np.squeeze(all_data[r,:,:,:,:,:,:]),(1,2))*np.sqrt(acc_factor) + show.imshow(abs(gmap), colorbar=True, scale=(1, 2)) + +recon = np.zeros( + (nreps, ncontrasts, nslices, eNz, eNy, rNx), dtype=np.complex64) +for r in range(0, nreps): + recon_data = transform.transform_kspace_to_image( + np.squeeze(all_data[r, :, :, :, :, :, :]), (1, 2))*np.sqrt(acc_factor) if acc_factor > 1: - recon[r,:,:,:,:] = np.sum(unmix * recon_data,0) + recon[r, :, :, :, :] = np.sum(unmix * recon_data, 0) else: - recon[r,:,:,:,:] = np.sum(np.conj(csm) * recon_data,0) - -show.imshow(np.squeeze(np.std(np.abs(recon),0)),colorbar=True,scale=(1,2)) + recon[r, :, :, :, :] = np.sum(np.conj(csm) * recon_data, 0) +show.imshow(np.squeeze(np.std(np.abs(recon), 0)), colorbar=True, scale=(1, 2)) diff --git a/setup.py b/setup.py index 7a91ca1..fb2f878 100644 --- a/setup.py +++ b/setup.py @@ -2,15 +2,15 @@ try: from sphinx.setup_command import BuildDoc - cmdclass={'build_sphinx':BuildDoc} + cmdclass = {'build_sphinx': BuildDoc} except ImportError: - cmdclass={} + cmdclass = {} setup(name='ismrmrd-python-tools', - version='0.1', - description='ISMRMRD Image Reconstruction Tools', - author='ISMRMRD Developers', - url='https://github.com/ismrmrd/ismrmrd-python-tools', - packages=['ismrmrdtools'], - cmdclass=cmdclass - ) + version='0.1', + description='ISMRMRD Image Reconstruction Tools', + author='ISMRMRD Developers', + url='https://github.com/ismrmrd/ismrmrd-python-tools', + packages=['ismrmrdtools'], + cmdclass=cmdclass + ) From 5059490f4f2ad445457c7dfc99c7b7783a63c990 Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Mon, 12 Oct 2015 12:29:11 -0500 Subject: [PATCH 02/15] MAINT: raise explicit exceptions rather than relying on assert statements --- ismrmrdtools/coils.py | 3 ++- ismrmrdtools/grappa.py | 9 ++++++--- ismrmrdtools/sense.py | 7 ++++--- ismrmrdtools/show.py | 18 ++++++++++-------- ismrmrdtools/simulation.py | 15 ++++++++------- 5 files changed, 30 insertions(+), 22 deletions(-) diff --git a/ismrmrdtools/coils.py b/ismrmrdtools/coils.py index 515d938..dc49153 100644 --- a/ismrmrdtools/coils.py +++ b/ismrmrdtools/coils.py @@ -51,7 +51,8 @@ def calculate_csm_walsh(img, smoothing=5, niter=3): :returns csm: Relative coil sensitivity maps, ``[coil, y, x]`` :returns rho: Total power in the estimated coils maps, ``[y, x]`` ''' - assert img.ndim == 3, "Coil sensitivity map must have exactly 3 dimensions" + if img.ndim != 3: + raise ValueError("Coil sensitivity map must have exactly 3 dimensions") ncoils = img.shape[0] ny = img.shape[1] diff --git a/ismrmrdtools/grappa.py b/ismrmrdtools/grappa.py index e20ac90..46ddbb3 100644 --- a/ismrmrdtools/grappa.py +++ b/ismrmrdtools/grappa.py @@ -113,9 +113,12 @@ def estimate_convolution_kernel(source_data, kernel_mask, if target_data is None: target_data = source_data - assert source_data.ndim == 3, "Source data must have exactly 3 dimensions" - assert target_data.ndim == 3, "Targe data must have exactly 3 dimensions" - assert kernel_mask.ndim == 2, "Kernel mask must have exactly 2 dimensions" + if source_data.ndim != 3: + raise ValueError("Source data must have exactly 3 dimensions") + if target_data.ndim != 3: + raise ValueError("Targe data must have exactly 3 dimensions") + if kernel_mask.ndim != 2: + raise ValueError("Kernel mask must have exactly 2 dimensions") nc_source = source_data.shape[0] nc_target = target_data.shape[0] diff --git a/ismrmrdtools/sense.py b/ismrmrdtools/sense.py index a29773a..d677ec8 100644 --- a/ismrmrdtools/sense.py +++ b/ismrmrdtools/sense.py @@ -19,7 +19,8 @@ def calculate_sense_unmixing(acc_factor, csm, regularization_factor=0.001): :returns gmap: Noise enhancement map, ``[y, x]`` ''' - assert csm.ndim == 3, "Coil sensitivity map must have exactly 3 dimensions" + if csm.ndim != 3: + raise ValueError("Coil sensitivity map must have exactly 3 dimensions") unmix = np.zeros(csm.shape, np.complex64) @@ -37,8 +38,8 @@ def _calculate_sense_unmixing_1d(acc_factor, csm1d, regularization_factor): nc = csm1d.shape[0] ny = csm1d.shape[1] - assert ( - ny % acc_factor) == 0, "ny must be a multiple of the acceleration factor" + if (ny % acc_factor) != 0: + raise ValueError("ny must be a multiple of the acceleration factor") unmix1d = np.zeros((nc, ny), dtype=np.complex64) diff --git a/ismrmrdtools/show.py b/ismrmrdtools/show.py index bd5b709..e6be9c7 100644 --- a/ismrmrdtools/show.py +++ b/ismrmrdtools/show.py @@ -7,7 +7,8 @@ from matplotlib.widgets import RectangleSelector -def imshow(image_matrix, tile_shape=None, scale=None, titles=[], colorbar=False, cmap='jet'): +def imshow(image_matrix, tile_shape=None, scale=None, titles=[], + colorbar=False, cmap='jet'): """ Tiles images and displays them in a window. :param image_matrix: a 2D or 3D set of image data @@ -16,8 +17,8 @@ def imshow(image_matrix, tile_shape=None, scale=None, titles=[], colorbar=False, :param titles: optional list of titles for each subplot :param cmap: optional colormap for all images """ - assert image_matrix.ndim in [ - 2, 3], "image_matrix must have 2 or 3 dimensions" + if image_matrix.ndim not in [2, 3]: + raise ValueError("image_matrix must have 2 or 3 dimensions") if image_matrix.ndim == 2: image_matrix = image_matrix.reshape( @@ -29,16 +30,17 @@ def imshow(image_matrix, tile_shape=None, scale=None, titles=[], colorbar=False, if not tile_shape: tile_shape = (1, image_matrix.shape[0]) - assert np.prod(tile_shape) >= image_matrix.shape[0],\ - "image tile rows x columns must equal the 3rd dim extent of image_matrix" + if np.prod(tile_shape) < image_matrix.shape[0]: + raise ValueError("image tile rows x columns must equal the 3rd dim " + "extent of image_matrix") # add empty titles as necessary if len(titles) < image_matrix.shape[0]: titles.extend(['' for x in range(image_matrix.shape[0] - len(titles))]) - if len(titles) > 0: - assert len(titles) >= image_matrix.shape[0],\ - "number of titles must equal 3rd dim extent of image_matrix" + if (len(titles) > 0) and (len(titles) < image_matrix.shape[0]): + raise ValueError("number of titles must equal 3rd dim extent of " + "image_matrix") def onselect(eclick, erelease): print((eclick.xdata, eclick.ydata), (erelease.xdata, erelease.ydata)) diff --git a/ismrmrdtools/simulation.py b/ismrmrdtools/simulation.py index 08c1630..158efef 100644 --- a/ismrmrdtools/simulation.py +++ b/ismrmrdtools/simulation.py @@ -34,12 +34,13 @@ def sample_data(img_obj, csm, acc=1, ref=0, sshift=0): sshift = sshift % acc - assert img_obj.ndim == 2, "Only two dimensional objects supported at the moment" - assert csm.ndim == 3, "csm must be a 3 dimensional array" - assert img_obj.shape[0] == csm.shape[ - 1], "Object and csm dimension mismatch" - assert img_obj.shape[1] == csm.shape[ - 2], "Object and csm dimension mismatch" + if img_obj.ndim != 2: + raise ValueError("Only two dimensional objects supported at the " + "moment") + if csm.ndim != 3: + raise ValueError("csm must be a 3 dimensional array") + if img_obj.shape[0:2] != csm.shape[1:3]: + raise ValueError("Object and csm dimension mismatch") pat_img = np.zeros(img_obj.shape, dtype=np.int8) pat_img[sshift:-1:acc, :] = 1 @@ -141,7 +142,7 @@ def phantom(matrix_size=256, phantom_type='Modified Shepp-Logan', if (ellipses is None): ellipses = _select_phantom(phantom_type) elif (np.size(ellipses, 1) != 6): - raise AssertionError("Wrong number of columns in user phantom") + raise ValueError("Wrong number of columns in user phantom") ph = np.zeros((matrix_size, matrix_size), dtype=np.float32) From ddfe4b545a2fe277383c19cc330e779c28de374d Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Mon, 12 Oct 2015 23:04:23 -0500 Subject: [PATCH 03/15] STY: convert docstrings to numpy format --- ismrmrdtools/coils.py | 112 ++++++++++++++--------- ismrmrdtools/grappa.py | 81 +++++++++++------ ismrmrdtools/ndarray_io.py | 50 ++++++----- ismrmrdtools/sense.py | 30 ++++--- ismrmrdtools/show.py | 19 ++-- ismrmrdtools/simulation.py | 176 +++++++++++++++++++++---------------- ismrmrdtools/transform.py | 50 +++++++---- 7 files changed, 323 insertions(+), 195 deletions(-) diff --git a/ismrmrdtools/coils.py b/ismrmrdtools/coils.py index dc49153..718fa54 100644 --- a/ismrmrdtools/coils.py +++ b/ismrmrdtools/coils.py @@ -1,22 +1,31 @@ -# -*- coding: utf-8 -*- """ -Utilities for coil sensivity maps, pre-whitening, etc +Utilities for coil sensivity maps, pre-whitening, etc. """ import numpy as np from scipy import ndimage def calculate_prewhitening(noise, scale_factor=1.0): - '''Calculates the noise prewhitening matrix + """Calculate the noise prewhitening matrix. - :param noise: Input noise data (array or matrix), ``[coil, nsamples]`` - :scale_factor: Applied on the noise covariance matrix. Used to - adjust for effective noise bandwith and difference in - sampling rate between noise calibration and actual measurement: - scale_factor = (T_acq_dwell/T_noise_dwell)*NoiseReceiverBandwidthRatio + Parameters + ---------- + noise : (coil, nsamples) array_like + Input noise data. + scale_factor: float, optional + Applied on the noise covariance matrix. Used to adjust for effective + noise bandwith and difference in sampling rate between noise + calibration and actual measurement. + + Returns + ------- + w : (coil, coil) array + Prewhitening matrix (w*data is prewhitened). - :returns w: Prewhitening matrix, ``[coil, coil]``, w*data is prewhitened - ''' + Notes + ----- + ``scale_factor = (T_acq_dwell/T_noise_dwell)*NoiseReceiverBandwidthRatio`` + """ noise_int = noise.reshape((noise.shape[0], noise.size/noise.shape[0])) M = float(noise_int.shape[1]) dmtx = (1/(M-1))*np.asmatrix(noise_int)*np.asmatrix(noise_int).H @@ -26,13 +35,20 @@ def calculate_prewhitening(noise, scale_factor=1.0): def apply_prewhitening(data, dmtx): - '''Apply the noise prewhitening matrix + """Apply the noise prewhitening matrix - :param noise: Input noise data (array or matrix), ``[coil, ...]`` - :param dmtx: Input noise prewhitening matrix + Parameters + ---------- + noise : (coil, ...) array_like + Input noise data. + dmtx : array_like + Input noise prewhitening matrix. - :returns w_data: Prewhitened data, ``[coil, ...]``, - ''' + Returns + ------- + w_data : (coil, ...) array + Prewhitened data. + """ s = data.shape return np.asarray( @@ -41,16 +57,25 @@ def apply_prewhitening(data, dmtx): def calculate_csm_walsh(img, smoothing=5, niter=3): - '''Calculates the coil sensitivities for 2D data using an iterative version of the Walsh method + """ Calculates the coil sensitivities for 2D data using an iterative + version of the Walsh method. - :param img: Input images, ``[coil, y, x]`` - :param smoothing: Smoothing -block size (default ``5``) - :parma niter: Number of iterations for the eigenvector power method (default ``3``) + Parameters + ---------- + img : (coil, y, x) array + Input images. + smoothing : int or array_like, optional + Smoothing kernel block size. + niter : int, optional + Number of iterations for the eigenvector power method. - :returns csm: Relative coil sensitivity maps, ``[coil, y, x]`` - :returns rho: Total power in the estimated coils maps, ``[y, x]`` - ''' + Returns + ------- + csm : (coil, y, x) array + Relative coil sensitivity maps. + rho : (y, x) array + Total power in the estimated coils maps. + """ if img.ndim != 3: raise ValueError("Coil sensitivity map must have exactly 3 dimensions") @@ -97,30 +122,30 @@ def calculate_csm_inati_iter(im, smoothing=5, niter=5, thresh=1e-3, Parameters ---------- - im : ndarray - Input images, [coil, y, x] or [coil, z, y, x]. - smoothing : int or ndarray-like + im : (coil, ...) ndarray + Input images, (coil, y, x) or (coil, z, y, x). + smoothing : int or array-like, optional Smoothing block size(s) for the spatial axes. - niter : int + niter : int, optional Maximal number of iterations to run. - thresh : float + thresh : float, optional Threshold on the relative coil map change required for early termination of iterations. If ``thresh=0``, the threshold check - will be skipped and all ``niter`` iterations will be performed. - verbose : bool + will be skipped and all `niter` iterations will be performed. + verbose : bool, optional If true, progress information will be printed out at each iteration. Returns ------- - coil_map : ndarray - Relative coil sensitivity maps, [coil, y, x] or [coil, z, y, x]. - coil_combined : ndarray - The coil combined image volume, [y, x] or [z, y, x]. + coil_map : (coil, ...) array + Relative coil sensitivity maps, (coil, y, x) or (coil, z, y, x). + coil_combined : array + The coil combined image volume, (y, x) or (z, y, x). Notes ----- The implementation corresponds to the algorithm described in [1]_ and is a - port of Gadgetron's ``coil_map_3d_Inati_Iter`` routine. + port of Gadgetron's `coil_map_3d_Inati_Iter` routine. For non-isotropic voxels it may be desirable to use non-uniform smoothing kernel sizes, so a length 3 array of smoothings is also supported. @@ -225,13 +250,20 @@ def calculate_csm_inati_iter(im, smoothing=5, niter=5, thresh=1e-3, def smooth(img, box=5): - '''Smooths coil images + """Smooth the coil images with a uniform filter. - :param img: Input complex images, ``[y, x] or [z, y, x]`` - :param box: Smoothing block size (default ``5``) + Parameters + ---------- + img : array + Input complex images, (y, x) or (z, y, x). + box : int or array-like, optional + Smoothing block size. - :returns simg: Smoothed complex image ``[y,x] or [z,y,x]`` - ''' + Returns + ------- + simg : array + Smoothed complex image, (y, x) or (z, y, x). + """ t_real = np.zeros(img.shape) t_imag = np.zeros(img.shape) diff --git a/ismrmrdtools/grappa.py b/ismrmrdtools/grappa.py index 46ddbb3..a4d9afd 100644 --- a/ismrmrdtools/grappa.py +++ b/ismrmrdtools/grappa.py @@ -6,21 +6,37 @@ def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4, 5), data_mask=None, csm=None, regularization_factor=0.001, target_data=None): - '''Calculates unmixing coefficients for a 2D image using a GRAPPA algorithm - - :param source_data: k-space source data ``[coils, y, x]`` - :param acc_factor: Acceleration factor, e.g. 2 - :param kernel_shape: Shape of the k-space kernel ``(ky-lines, kx-points)`` (default ``(4,5)``) - :param data_mask: Mask of where calibration data is located in source_data (defaults to all of source_data) - :param csm: Coil sensitivity map, ``[coil, y, x]`` (used for b1-weighted combining. Will be estimated from calibratino data if not supplied) - :param regularization_factor: adds tychonov regularization (default ``0.001``) - - 0 = no regularization - - set higher for more aggressive regularization. - :param target_data: If target data differs from source data (defaults to source_data) - - :returns unmix: Image unmixing coefficients for a single ``x`` location, ``[coil, y, x]`` - :returns gmap: Noise enhancement map, ``[y, x]`` - ''' + """Calculates unmixing coefficients for a 2D image using a GRAPPA algorithm + + Paramters + --------- + source_data : (coils, y, x) array + k-space source data. + acc_factor : int + Acceleration factor, e.g. 2 + kernel_shape : tuple, optional + Shape of the k-space kernel (ky-lines, kx-points). + data_mask : (y, x) array or None, optional + Mask of where calibration data is located in source_data. defaults to + all of the source data. + csm : (coil, y, x) array or None, optional + Coil sensitivity map. (used for b1-weighted combining. Will be + estimated from calibratino data if not supplied.) + regularization_factor : float, optional + Tikhonov regularization weight. + - 0 = no regularization + - set higher for more aggressive regularization. + target_data : (coil, y, x) array or None, optional + If target data differs from source data. (defaults to source_data) + + + Returns + ------- + unmix : (coil, y, x) array + Image unmixing coefficients for a single ``x`` location. + gmap : (y, x) array + Noise enhancement map. + """ nx = source_data.shape[2] ny = source_data.shape[1] @@ -96,19 +112,28 @@ def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4, 5), def estimate_convolution_kernel(source_data, kernel_mask, regularization_factor=0.001, target_data=None): - '''Estimates a 2D k-space convolution kernel (as used in GRAPPA or SPIRiT) - - :param source_data: k-space source data ``[coils, y, x]`` - :param kernel_mask: Mask indicating which k-space samples to use in the neighborhood. ``[ky kx]`` - :param csm: Coil sensitivity map, ``[coil, y, x]`` (used for b1-weighted combining. Will be estimated from calibratino data if not supplied) - :param regularization_factor: adds tychonov regularization (default ``0.001``) - - 0 = no regularization - - set higher for more aggressive regularization. - :param target_data: If target data differs from source data (defaults to source_data) - - :returns unmix: Image unmixing coefficients for a single ``x`` location, ``[coil, y, x]`` - :returns gmap: Noise enhancement map, ``[y, x]`` - ''' + """Estimates a 2D k-space convolution kernel (as used in GRAPPA or SPIRiT). + + Paramters + --------- + source_data : (coil, y, x) array + k-space source data. + kernel_mask : (ky, kx) array + Mask indicating which k-space samples to use in the neighborhood. + regularization_factor : float, optional + Tikhonov regularization weight + - 0 = no regularization + - set higher for more aggressive regularization. + target_data : (coil, y, x) array or None, optional + If target data differs from source data (defaults to source_data) + + Returns + ------- + unmix : (coil, y, x) array + Image unmixing coefficients for a single ``x`` location. + gmap : (y, x) array + Noise enhancement map. + """ if target_data is None: target_data = source_data diff --git a/ismrmrdtools/ndarray_io.py b/ismrmrdtools/ndarray_io.py index 5763a70..e3e9342 100644 --- a/ismrmrdtools/ndarray_io.py +++ b/ismrmrdtools/ndarray_io.py @@ -3,21 +3,25 @@ def write_ndarray(filename, ndarray): - '''Writes simple ndarray format. This format is mostly used for debugging purposes + """Writes a simple ndarray format. This format is mostly used for debugging + purposes. + + Paramters + --------- + filename : str + Name of file containing array (extension appended automatically). + ndarray : array + The array to write out to `filename`. + + Notes + ----- The file name extension indicates the binary data format: '*.float' indicates float32 - '*.double' indicates float64 - '*.cplx' indicates complex64 - '*.dplx' indicatex complex128 - - :param filename: Name of file containing array (extension appended automatically) - :param ndarray: Numpy array - ''' - + """ datatype = ndarray.dtype if datatype == np.dtype(np.float32): fullfilename = filename + str('.float') @@ -28,7 +32,7 @@ def write_ndarray(filename, ndarray): elif datatype == np.dtype(np.complex128): fullfilename = filename + str('.dplx') else: - raise Exception('Unsupported data type') + raise ValueError('Unsupported data type') f = open(fullfilename, 'wb') dims = np.zeros((ndarray.ndim+1, 1), dtype=np.int32) @@ -40,22 +44,28 @@ def write_ndarray(filename, ndarray): def read_ndarray(filename): - '''Reads simple ndarray format. This format is mostly used for debugging purposes + """Read a simple ndarray format. This format is mostly used for debugging + purposes. + + Paramters + --------- + filename : str + Name of file containing array. + + Returns + ------- + arr: array + Numpy ndarray. + + Notes + ----- The file name extension indicates the binary data format: '*.float' indicates float32 - '*.double' indicates float64 - '*.cplx' indicates complex64 - '*.dplx' indicatex complex128 - - :param filename: Name of file containing array - :returns arr: Numpy ndarray - ''' - - datatype = None + """ if filename.endswith('.float'): datatype = np.dtype(np.float32) elif filename.endswith('.double'): diff --git a/ismrmrdtools/sense.py b/ismrmrdtools/sense.py index d677ec8..778c87e 100644 --- a/ismrmrdtools/sense.py +++ b/ismrmrdtools/sense.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Calculate SENSE unmixing coefficients """ @@ -6,18 +5,27 @@ def calculate_sense_unmixing(acc_factor, csm, regularization_factor=0.001): - '''Calculates the unmixing coefficients for a 2D image using a SENSE algorithm + """Calculate the unmixing coefficients for a 2D image using a + SENSE algorithm. - :param acc_factor: Acceleration factor, e.g. 2 - :param csm: Coil sensitivity map, ``[coil, y, x]`` - :param regularization_factor: adds tychonov regularization (default ``0.001``) + Paramters + --------- + acc_factor : int + Acceleration factor, e.g. 2 + csm : (coil, y, x) array + Coil sensitivity map. + regularization_factor : float, optional + Tikhonov regularization weight. + - 0 = no regularization + - set higher for more aggressive regularization. - - 0 = no regularization - - set higher for more aggressive regularization. - - :returns unmix: Image unmixing coefficients for a single ``x`` location, ``[coil, y, x]`` - :returns gmap: Noise enhancement map, ``[y, x]`` - ''' + Returns + ------- + unmix : (coil, y, x) array + Image unmixing coefficients for a single ``x`` location. + gmap : (y, x) array + Noise enhancement map. + """ if csm.ndim != 3: raise ValueError("Coil sensitivity map must have exactly 3 dimensions") diff --git a/ismrmrdtools/show.py b/ismrmrdtools/show.py index e6be9c7..b3327a1 100644 --- a/ismrmrdtools/show.py +++ b/ismrmrdtools/show.py @@ -9,13 +9,20 @@ def imshow(image_matrix, tile_shape=None, scale=None, titles=[], colorbar=False, cmap='jet'): - """ Tiles images and displays them in a window. + """ Tile images and display them in a window. - :param image_matrix: a 2D or 3D set of image data - :param tile_shape: optional shape ``(rows, cols)`` for tiling images - :param scale: optional ``(min,max)`` values for scaling all images - :param titles: optional list of titles for each subplot - :param cmap: optional colormap for all images + Paramters + --------- + image_matrix : array + a 2D or 3D set of image data + tile_shape : array or None, optional + optional shape `(rows, cols)` for tiling images + scale : tuple or None, optional + optional `(min, max)` values for scaling all images + titles : list or None, optional + optional list of titles for each subplot + cmap : str or `matplotlib.colors.Colormap`, optional + optional colormap for all images """ if image_matrix.ndim not in [2, 3]: raise ValueError("image_matrix must have 2 or 3 dimensions") diff --git a/ismrmrdtools/simulation.py b/ismrmrdtools/simulation.py index 158efef..a1251b7 100644 --- a/ismrmrdtools/simulation.py +++ b/ismrmrdtools/simulation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Tools for generating coil sensitivities and phantoms """ @@ -7,29 +6,38 @@ def sample_data(img_obj, csm, acc=1, ref=0, sshift=0): - """ - Samples the k-space of object provided in 'img_obj' after first applying - coil sensitivity maps in 'csm' and Fourier transforming to k-space. - - INPUT: - - img_obj [x,y] : Object in image space - - csm [x,y,c] : Coil sensitivity maps - - acc scalar : Acceleration factor - - ref scalar : Reference lines (in center of k-space) - - sshift scalar : Sampling shift, i.e for undersampling, do we - start with line 1 or line 1+sshift? - - OUPUT: - - data [kx,ky,c]: Sampled data in k-space (zeros where not sampled) - - pat [kx,ky,c]: Sampling pattern (0 = not sampled, - 1 = imaging data, - 2 = reference data, - 3 = reference and imaging data) - - - Code made available for the ISMRM 2013 Sunrise Educational Course - - Michael S. Hansen (michael.hansen@nih.gov) + """Sample the k-space of object provided in `img_obj` after first applying + coil sensitivity maps in `csm` and Fourier transforming to k-space. + + Paramters + --------- + img_obj : (y, x) array + Object in image space + csm : (c, y, x) array + Coil sensitivity maps + acc : float, optional + Acceleration factor + ref : float, optional + Reference lines (in center of k-space) + sshift : float, optional + Sampling shift, i.e for undersampling, do we start with line 1 or line + 1+sshift?. + + Returns + ------- + data : (c, ky, kx) array + Sampled data in k-space (zeros where not sampled). + pat : (c, ky, kx) array + Sampling pattern : (0 = not sampled, + 1 = imaging data, + 2 = reference data, + 3 = reference and imaging data) + + Notes + ----- + Code made available for the ISMRM 2013 Sunrise Educational Course + + Michael S. Hansen (michael.hansen@nih.gov) """ sshift = sshift % acc @@ -58,11 +66,24 @@ def sample_data(img_obj, csm, acc=1, ref=0, sshift=0): def generate_birdcage_sensitivities(matrix_size=256, number_of_coils=8, relative_radius=1.5, normalize=True): - """ Generates birdcage coil sensitivites. + """ Generate birdcage coil sensitivites. + + + Parameters + ---------- + matrix_size : int, optional + size of imaging matrix in pixels. + number_of_coils : int, optional + Number of simulated coils. + relative_radius : int, optional + Relative radius of birdcage. + normalize : bool, optional + If True, normalize by the root sum-of-squares intensity. - :param matrix_size: size of imaging matrix in pixels (default ``256``) - :param number_of_coils: Number of simulated coils (default ``8``) - :param relative_radius: Relative radius of birdcage (default ``1.5``) + Returns + ------- + out : array + coil sensitivies (number_of_coils, matrix_size, matrix_size) This function is heavily inspired by the mri_birdcage.m Matlab script in Jeff Fessler's IRT package: http://web.eecs.umich.edu/~fessler/code/ @@ -93,50 +114,57 @@ def generate_birdcage_sensitivities(matrix_size=256, number_of_coils=8, def phantom(matrix_size=256, phantom_type='Modified Shepp-Logan', ellipses=None): """ - Create a Shepp-Logan or modified Shepp-Logan phantom:: - - phantom (n = 256, phantom_type = 'Modified Shepp-Logan', - ellipses = None) - - :param matrix_size: size of imaging matrix in pixels (default 256) - - :param phantom_type: The type of phantom to produce. - Either "Modified Shepp-Logan" or "Shepp-Logan". This is overridden - if ``ellipses`` is also specified. - - :param ellipses: Custom set of ellipses to use. These should be in - the form:: - - [[I, a, b, x0, y0, phi], - [I, a, b, x0, y0, phi], - ...] - - where each row defines an ellipse. - - :I: Additive intensity of the ellipse. - :a: Length of the major axis. - :b: Length of the minor axis. - :x0: Horizontal offset of the centre of the ellipse. - :y0: Vertical offset of the centre of the ellipse. - :phi: Counterclockwise rotation of the ellipse in degrees, - measured as the angle between the horizontal axis and - the ellipse major axis. - - The image bounding box in the algorithm is ``[-1, -1], [1, 1]``, - so the values of ``a``, ``b``, ``x0``, ``y0`` should all be specified with - respect to this box. - - :returns: Phantom image - - References: - - Shepp, L. A.; Logan, B. F.; Reconstructing Interior Head Tissue - from X-Ray Transmissions, IEEE Transactions on Nuclear Science, - Feb. 1974, p. 232. - - Toft, P.; "The Radon Transform - Theory and Implementation", - Ph.D. thesis, Department of Mathematical Modelling, Technical - University of Denmark, June 1996. + Create a Shepp-Logan [1]_ or modified Shepp-Logan [2]_ phantom. + + Parameters + ---------- + matrix_size : int, optional + size of imaging matrix in pixels. + + phantom_type : {'Modified Shepp-Logan', 'Shepp-Logan'}, optional + The type of phantom to produce. This is overridden if `ellipses` is + also specified. + + ellipses : list or None, optional + Custom set of ellipses to use. See notes below for details. + + Returns + ------- + ph : array + Phantom image. + + Notes + ----- + The `ellipses` should be in the form:: + + [[I, a, b, x0, y0, phi], + [I, a, b, x0, y0, phi], + ...] + where each row defines an ellipse. + + I: Additive intensity of the ellipse. + a: Length of the major axis. + b: Length of the minor axis. + x0: Horizontal offset of the centre of the ellipse. + y0: Vertical offset of the centre of the ellipse. + phi: Counterclockwise rotation of the ellipse in degrees, + measured as the angle between the horizontal axis and + the ellipse major axis. + + The image bounding box in the algorithm is `[-1, -1], [1, 1]`, + so the values of `a`, `b`, `x0`, `y0` should all be specified + with respect to this box. + + References + ---------- + + .. [1] Shepp, L. A.; Logan, B. F.; Reconstructing Interior Head Tissue + from X-Ray Transmissions, IEEE Transactions on Nuclear Science, + Feb. 1974, p. 232. + + .. [2] Toft, P.; "The Radon Transform - Theory and Implementation", + Ph.D. thesis, Department of Mathematical Modelling, Technical + University of Denmark, June 1996. """ if (ellipses is None): @@ -185,7 +213,7 @@ def _select_phantom(name): def _shepp_logan (): - """ Standard head phantom, taken from Shepp & Logan. """ + """Standard head phantom, taken from Shepp & Logan.""" return [[ 2, .69, .92, 0, 0, 0], [-.98, .6624, .8740, 0, -.0184, 0], [-.02, .1100, .3100, .22, 0, -18], @@ -199,7 +227,7 @@ def _shepp_logan (): def _mod_shepp_logan (): - """ Modified version of Shepp & Logan's head phantom, adjusted to improve + """Modified version of Shepp & Logan's head phantom, adjusted to improve contrast. Taken from Toft. """ return [[ 1, .69, .92, 0, 0, 0], diff --git a/ismrmrdtools/transform.py b/ismrmrdtools/transform.py index 611d819..98393d0 100644 --- a/ismrmrdtools/transform.py +++ b/ismrmrdtools/transform.py @@ -6,15 +6,24 @@ def transform_kspace_to_image(k, dim=None, img_shape=None): - """ Computes the Fourier transform from k-space to image space - along a given or all dimensions - - :param k: k-space data - :param dim: vector of dimensions to transform - :param img_shape: desired shape of output image - :returns: data in image space (along transformed dimensions) + """ Compute the Fourier transform from k-space to image space + along a given or all dimensions. + + Paramters + --------- + k : array + k-space data + dim : tuple, optional + vector of dimensions to transform + img_shape : tuple, optional + desired shape of output image + + Returns + ------- + img : array + data in image space (along transformed dimensions) """ - if not dim: + if dim is None: dim = range(k.ndim) img = fftshift( @@ -24,15 +33,24 @@ def transform_kspace_to_image(k, dim=None, img_shape=None): def transform_image_to_kspace(img, dim=None, k_shape=None): - """ Computes the Fourier transform from image space to k-space space - along a given or all dimensions - - :param img: image space data - :param dim: vector of dimensions to transform - :param k_shape: desired shape of output k-space data - :returns: data in k-space (along transformed dimensions) + """Compute the Fourier transform from image space to k-space space + along a given or all dimensions. + + Paramters + --------- + img : array + image space data + dim : tuple, optional + vector of dimensions to transform + k_shape : tuple, optional + desired shape of output k-space data + + Returns + ------- + k : array + data in k-space (along transformed dimensions) """ - if not dim: + if dim is None: dim = range(img.ndim) k = fftshift(fftn(ifftshift(img, axes=dim), s=k_shape, axes=dim), axes=dim) From a76108dedce6eca8f6603a5eaf3ec0863ab4f767 Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Mon, 12 Oct 2015 23:27:17 -0500 Subject: [PATCH 04/15] MAINT: remove calls to reload modules --- parallel_imaging_demo.py | 26 -------------------------- recon_multi_reps.py | 1 - 2 files changed, 27 deletions(-) diff --git a/parallel_imaging_demo.py b/parallel_imaging_demo.py index 80858b8..c4e8a11 100644 --- a/parallel_imaging_demo.py +++ b/parallel_imaging_demo.py @@ -1,12 +1,10 @@ # -*- coding: utf-8 -*- -#%% # Basic setup import numpy as np import scipy as sp from ismrmrdtools import sense, grappa, show, simulation, transform, coils -#%% # import some data exercise_data = sp.io.loadmat('hansen_exercises2.mat') csm = np.transpose(exercise_data['smaps']) @@ -31,31 +29,17 @@ 'alias_img_py': alias_img, 'unmix_grappa_py': unmix_grappa}) -#%% -# Reload some modules -reload(show) -reload(sense) -reload(grappa) -reload(simulation) -reload(transform) -reload(coils) - -#%% -reload(simulation) matrix_size = 256 csm = simulation.generate_birdcage_sensitivities(matrix_size) phan = simulation.phantom(matrix_size) coil_images = np.tile(phan, (8, 1, 1)) * csm show.imshow(abs(coil_images), tile_shape=(4, 2), colorbar=True) -#%% # Undersample -reload(simulation) acc_factor = 2 ref_lines = 16 (data, pat) = simulation.sample_data(phan, csm, acc_factor, ref_lines) -#%% # Add noise noise = np.random.standard_normal( data.shape) + 1j*np.random.standard_normal(data.shape) @@ -63,32 +47,23 @@ kspace = np.logical_or(pat == 1, pat == 3).astype('float32')*(data + noise) data = (pat > 0).astype('float32')*(data + noise) -#%% # Calculate the noise prewhitening matrix dmtx = coils.calculate_prewhitening(noise) -#%% # Apply prewhitening kspace = coils.apply_prewhitening(kspace, dmtx) data = coils.apply_prewhitening(data, dmtx) - -#%% # Reconstruct aliased images alias_img = transform.transform_kspace_to_image( kspace, dim=(1, 2)) * np.sqrt(acc_factor) show.imshow(abs(alias_img)) - -#%% -reload(sense) (unmix_sense, gmap_sense) = sense.calculate_sense_unmixing(acc_factor, csm) show.imshow(abs(gmap_sense), colorbar=True) recon_sense = np.squeeze(np.sum(alias_img * unmix_sense, 0)) show.imshow(abs(recon_sense), colorbar=True) -#%% -reload(grappa) # (unmix_grappa,gmap_grappa) = grappa.calculate_grappa_unmixing( # data, acc_factor, data_mask=pat>1, csm=csm,kernel_size=(2,5)) (unmix_grappa, gmap_grappa) = grappa.calculate_grappa_unmixing( @@ -97,7 +72,6 @@ recon_grappa = np.squeeze(np.sum(alias_img * unmix_grappa, 0)) show.imshow(abs(recon_grappa), colorbar=True) -#%% # Pseudo replica example reps = 255 reps_sense = np.zeros((reps, ) + recon_grappa.shape[:2], dtype=np.complex64) diff --git a/recon_multi_reps.py b/recon_multi_reps.py index 5c032ed..4b96477 100644 --- a/recon_multi_reps.py +++ b/recon_multi_reps.py @@ -163,7 +163,6 @@ if acc_factor > 1: coil_data = np.squeeze(np.mean(all_data, 0)) - reload(grappa) (unmix, gmap) = grappa.calculate_grappa_unmixing(coil_data, acc_factor) #(unmix,gmap) = sense.calculate_sense_unmixing(acc_factor,csm) show.imshow(abs(gmap), colorbar=True, scale=(1, 2)) From 6d9c3aaabd78ef935356365b744175f7a8bbea38 Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Mon, 12 Oct 2015 23:42:38 -0500 Subject: [PATCH 05/15] MAINT: misc additional cleanups and python 3 compatibility tweaks --- csm_estimation_demo.py | 2 +- generate_cartesian_shepp_logan_dataset.py | 1 + ismrmrdtools/coils.py | 1 + ismrmrdtools/grappa.py | 3 ++- ismrmrdtools/ndarray_io.py | 1 + ismrmrdtools/sense.py | 1 + ismrmrdtools/show.py | 1 + ismrmrdtools/simulation.py | 1 + ismrmrdtools/transform.py | 5 ++-- parallel_imaging_demo.py | 33 ++++++++++++----------- recon_ismrmrd_dataset.py | 8 +++--- recon_multi_reps.py | 22 +++++---------- 12 files changed, 40 insertions(+), 39 deletions(-) diff --git a/csm_estimation_demo.py b/csm_estimation_demo.py index 574c048..88b56c3 100644 --- a/csm_estimation_demo.py +++ b/csm_estimation_demo.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- -#%% #Basic setup +from __future__ import division, print_function, absolute_import import time import numpy as np from ismrmrdtools import simulation, coils, show diff --git a/generate_cartesian_shepp_logan_dataset.py b/generate_cartesian_shepp_logan_dataset.py index 7456f02..3556646 100644 --- a/generate_cartesian_shepp_logan_dataset.py +++ b/generate_cartesian_shepp_logan_dataset.py @@ -1,4 +1,5 @@ # coding: utf-8 +from __future__ import division, print_function, absolute_import import ismrmrd import ismrmrd.xsd from ismrmrdtools import simulation, transform diff --git a/ismrmrdtools/coils.py b/ismrmrdtools/coils.py index 718fa54..a092015 100644 --- a/ismrmrdtools/coils.py +++ b/ismrmrdtools/coils.py @@ -1,6 +1,7 @@ """ Utilities for coil sensivity maps, pre-whitening, etc. """ +from __future__ import division, print_function, absolute_import import numpy as np from scipy import ndimage diff --git a/ismrmrdtools/grappa.py b/ismrmrdtools/grappa.py index a4d9afd..dcd0777 100644 --- a/ismrmrdtools/grappa.py +++ b/ismrmrdtools/grappa.py @@ -1,6 +1,7 @@ +from __future__ import division, print_function, absolute_import import numpy as np from numpy.fft import fftshift, ifftshift, ifftn -import coils +from . import coils def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4, 5), diff --git a/ismrmrdtools/ndarray_io.py b/ismrmrdtools/ndarray_io.py index e3e9342..dec9948 100644 --- a/ismrmrdtools/ndarray_io.py +++ b/ismrmrdtools/ndarray_io.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import import numpy as np from struct import unpack diff --git a/ismrmrdtools/sense.py b/ismrmrdtools/sense.py index 778c87e..e86d2d6 100644 --- a/ismrmrdtools/sense.py +++ b/ismrmrdtools/sense.py @@ -1,6 +1,7 @@ """ Calculate SENSE unmixing coefficients """ +from __future__ import division, print_function, absolute_import import numpy as np diff --git a/ismrmrdtools/show.py b/ismrmrdtools/show.py index b3327a1..6173bfa 100644 --- a/ismrmrdtools/show.py +++ b/ismrmrdtools/show.py @@ -1,6 +1,7 @@ """ Simple tiled image display """ +from __future__ import division, print_function, absolute_import import numpy as np import matplotlib.image import matplotlib.pyplot as plt diff --git a/ismrmrdtools/simulation.py b/ismrmrdtools/simulation.py index a1251b7..012dba9 100644 --- a/ismrmrdtools/simulation.py +++ b/ismrmrdtools/simulation.py @@ -1,6 +1,7 @@ """ Tools for generating coil sensitivities and phantoms """ +from __future__ import division, print_function, absolute_import import numpy as np from ismrmrdtools import transform diff --git a/ismrmrdtools/transform.py b/ismrmrdtools/transform.py index 98393d0..ab4fbcd 100644 --- a/ismrmrdtools/transform.py +++ b/ismrmrdtools/transform.py @@ -1,6 +1,7 @@ """ Helpers for transforming data from k-space to image space and vice-versa. """ +from __future__ import division, print_function, absolute_import import numpy as np from numpy.fft import fftshift, ifftshift, fftn, ifftn @@ -24,7 +25,7 @@ def transform_kspace_to_image(k, dim=None, img_shape=None): data in image space (along transformed dimensions) """ if dim is None: - dim = range(k.ndim) + dim = tuple(range(k.ndim)) img = fftshift( ifftn(ifftshift(k, axes=dim), s=img_shape, axes=dim), axes=dim) @@ -51,7 +52,7 @@ def transform_image_to_kspace(img, dim=None, k_shape=None): data in k-space (along transformed dimensions) """ if dim is None: - dim = range(img.ndim) + dim = tuple(range(img.ndim)) k = fftshift(fftn(ifftshift(img, axes=dim), s=k_shape, axes=dim), axes=dim) k /= np.sqrt(np.prod(np.take(img.shape, dim))) diff --git a/parallel_imaging_demo.py b/parallel_imaging_demo.py index c4e8a11..6fba4fd 100644 --- a/parallel_imaging_demo.py +++ b/parallel_imaging_demo.py @@ -1,12 +1,13 @@ # -*- coding: utf-8 -*- # Basic setup +from __future__ import division, print_function, absolute_import import numpy as np -import scipy as sp +import scipy.io from ismrmrdtools import sense, grappa, show, simulation, transform, coils # import some data -exercise_data = sp.io.loadmat('hansen_exercises2.mat') +exercise_data = scipy.io.loadmat('hansen_exercises2.mat') csm = np.transpose(exercise_data['smaps']) pat = np.transpose(exercise_data['sp']) data = np.transpose(exercise_data['data']) @@ -15,25 +16,25 @@ acc_factor = 4 alias_img = transform.transform_kspace_to_image( kspace, dim=(1, 2)) * np.sqrt(acc_factor) -show.imshow(abs(alias_img)) +show.imshow(np.abs(alias_img)) (unmix_grappa, gmap_grappa) = grappa.calculate_grappa_unmixing( data, acc_factor, data_mask=pat > 1, csm=csm, kernel_size=(4, 5)) # (unmix_grappa, gmap_grappa) = grappa.calculate_grappa_unmixing( # data, acc_factor, data_mask=pat>1) -show.imshow(abs(gmap_grappa), colorbar=True) +show.imshow(np.abs(gmap_grappa), colorbar=True) recon_grappa = np.squeeze(np.sum(alias_img * unmix_grappa, 0)) -show.imshow(abs(recon_grappa), colorbar=True) +show.imshow(np.abs(recon_grappa), colorbar=True) -sp.io.savemat('tmp_data.mat', {'pat_py': pat, 'data_py': data, 'csm_py': csm, - 'alias_img_py': alias_img, - 'unmix_grappa_py': unmix_grappa}) +scipy.io.savemat('tmp_data.mat', {'pat_py': pat, 'data_py': data, 'csm_py': csm, + 'alias_img_py': alias_img, + 'unmix_grappa_py': unmix_grappa}) matrix_size = 256 csm = simulation.generate_birdcage_sensitivities(matrix_size) phan = simulation.phantom(matrix_size) coil_images = np.tile(phan, (8, 1, 1)) * csm -show.imshow(abs(coil_images), tile_shape=(4, 2), colorbar=True) +show.imshow(np.abs(coil_images), tile_shape=(4, 2), colorbar=True) # Undersample acc_factor = 2 @@ -57,20 +58,20 @@ # Reconstruct aliased images alias_img = transform.transform_kspace_to_image( kspace, dim=(1, 2)) * np.sqrt(acc_factor) -show.imshow(abs(alias_img)) +show.imshow(np.abs(alias_img)) (unmix_sense, gmap_sense) = sense.calculate_sense_unmixing(acc_factor, csm) -show.imshow(abs(gmap_sense), colorbar=True) +show.imshow(np.abs(gmap_sense), colorbar=True) recon_sense = np.squeeze(np.sum(alias_img * unmix_sense, 0)) -show.imshow(abs(recon_sense), colorbar=True) +show.imshow(np.abs(recon_sense), colorbar=True) # (unmix_grappa,gmap_grappa) = grappa.calculate_grappa_unmixing( # data, acc_factor, data_mask=pat>1, csm=csm,kernel_size=(2,5)) (unmix_grappa, gmap_grappa) = grappa.calculate_grappa_unmixing( data, acc_factor, data_mask=pat > 1, kernel_size=(2, 5)) -show.imshow(abs(gmap_grappa), colorbar=True) +show.imshow(np.abs(gmap_grappa), colorbar=True) recon_grappa = np.squeeze(np.sum(alias_img * unmix_grappa, 0)) -show.imshow(abs(recon_grappa), colorbar=True) +show.imshow(np.abs(recon_grappa), colorbar=True) # Pseudo replica example reps = 255 @@ -87,6 +88,6 @@ reps_grappa[r, :, :] = np.squeeze(np.sum(alias_img_r * unmix_grappa, 0)) std_sense = np.std(np.real(reps_sense), 0) -show.imshow(abs(std_sense), colorbar=True) +show.imshow(np.abs(std_sense), colorbar=True) std_grappa = np.std(np.real(reps_grappa), 0) -show.imshow(abs(std_grappa), colorbar=True) +show.imshow(np.abs(std_grappa), colorbar=True) diff --git a/recon_ismrmrd_dataset.py b/recon_ismrmrd_dataset.py index 2846379..18b1d17 100644 --- a/recon_ismrmrd_dataset.py +++ b/recon_ismrmrd_dataset.py @@ -1,5 +1,5 @@ # coding: utf-8 - +from __future__ import division, print_function, absolute_import import os import ismrmrd import ismrmrd.xsd @@ -35,17 +35,17 @@ # Number of Slices, Reps, Contrasts, etc. ncoils = header.acquisitionSystemInformation.receiverChannels -if enc.encodingLimits.slice != None: +if enc.encodingLimits.slice is not None: nslices = enc.encodingLimits.slice.maximum + 1 else: nslices = 1 -if enc.encodingLimits.repetition != None: +if enc.encodingLimits.repetition is not None: nreps = enc.encodingLimits.repetition.maximum + 1 else: nreps = 1 -if enc.encodingLimits.contrast != None: +if enc.encodingLimits.contrast is not None: ncontrasts = enc.encodingLimits.contrast.maximum + 1 else: ncontrasts = 1 diff --git a/recon_multi_reps.py b/recon_multi_reps.py index 4b96477..344b7b4 100644 --- a/recon_multi_reps.py +++ b/recon_multi_reps.py @@ -4,8 +4,7 @@ @author: Michael S. Hansen """ - -#%% +from __future__ import division, print_function, absolute_import import os import ismrmrd import ismrmrd.xsd @@ -13,7 +12,6 @@ from ismrmrdtools import show, transform, coils, grappa -#%% # Convert data from siemens file with # siemens_to_ismrmrd -f meas_MID00032_FID22409_oil_gre_128_150reps_pause_alpha_10.dat -z 1 -o data_reps_noise.h5 # siemens_to_ismrmrd -f meas_MID00032_FID22409_oil_gre_128_150reps_pause_alpha_10.dat -z 2 -o data_reps_data.h5 @@ -25,15 +23,12 @@ filename_noise = 'tpat3_noise.h5' filename_data = 'tpat3_data.h5' - -#%% # Read the noise data if not os.path.isfile(filename_noise): print("%s is not a valid file" % filename_noise) raise SystemExit noise_dset = ismrmrd.Dataset(filename_noise, 'dataset', create_if_needed=False) -#%% # Process the noise data noise_reps = noise_dset.number_of_acquisitions() a = noise_dset.read_acquisition(0) @@ -54,8 +49,7 @@ noise = noise.astype('complex64') -#%% Read the actual data -# Read the noise data +# Read the actual data if not os.path.isfile(filename_data): print("%s is not a valid file" % filename_data) raise SystemExit @@ -85,17 +79,17 @@ # Number of Slices, Reps, Contrasts, etc. ncoils = header.acquisitionSystemInformation.receiverChannels -if enc.encodingLimits.slice != None: +if enc.encodingLimits.slice is not None: nslices = enc.encodingLimits.slice.maximum + 1 else: nslices = 1 -if enc.encodingLimits.repetition != None: +if enc.encodingLimits.repetition is not None: nreps = enc.encodingLimits.repetition.maximum + 1 else: nreps = 1 -if enc.encodingLimits.contrast != None: +if enc.encodingLimits.contrast is not None: ncontrasts = enc.encodingLimits.contrast.maximum + 1 else: ncontrasts = 1 @@ -118,10 +112,9 @@ data_dwell_time = a.sample_time_us noise_receiver_bw_ratio = 0.79 dmtx = coils.calculate_prewhitening( - noise, scale_factor=(data_dwell_time/noise_dwell_time)*noise_receiver_bw_ratio) - + noise, + scale_factor=(data_dwell_time/noise_dwell_time)*noise_receiver_bw_ratio) -#%% # Process the actual data all_data = np.zeros( (nreps, ncontrasts, nslices, ncoils, eNz, eNy, rNx), dtype=np.complex64) @@ -153,7 +146,6 @@ all_data = all_data.astype('complex64') -#%% # Coil combination coil_images = transform.transform_kspace_to_image( np.squeeze(np.mean(all_data, 0)), (1, 2)) From 2601f4599abf5ad1d1ad4697229f50237a6bb7ec Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Mon, 12 Oct 2015 23:46:04 -0500 Subject: [PATCH 06/15] STY: omit unnecessary '0, ' from range statements --- ismrmrdtools/grappa.py | 16 ++++++++-------- ismrmrdtools/ndarray_io.py | 4 ++-- ismrmrdtools/sense.py | 4 ++-- ismrmrdtools/simulation.py | 6 +++--- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/ismrmrdtools/grappa.py b/ismrmrdtools/grappa.py index dcd0777..81ae049 100644 --- a/ismrmrdtools/grappa.py +++ b/ismrmrdtools/grappa.py @@ -74,7 +74,7 @@ def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4, 5), kx_cal = (sampled_indices[1][0], sampled_indices[1][-1]) ky_cal = (sampled_indices[0][0], sampled_indices[0][-1]) - for s in range(0, acc_factor): + for s in range(acc_factor): kernel_mask = np.zeros( (kernel_size[0]*acc_factor, kernel_size[1]), dtype=np.int8) kernel_mask[s:kernel_mask.shape[0]:acc_factor, :] = 1 @@ -95,7 +95,7 @@ def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4, 5), unmix = np.zeros(source_data.shape, dtype=np.complex64) - for c in range(0, nc_target): + for c in range(nc_target): kernel_pad = _pad_kernel(kernel[c, :, :, :], unmix.shape) kernel_pad = fftshift( ifftn(ifftshift(kernel_pad, axes=(1, 2)), axes=(1, 2)), axes=(1, 2)) @@ -163,15 +163,15 @@ def estimate_convolution_kernel(source_data, kernel_mask, A = np.asmatrix(np.zeros((equations, unknowns), dtype=np.complex128)) b = np.asmatrix(np.zeros((equations, nc_target), dtype=np.complex128)) - for sc in range(0, nc_source): - for p in range(0, offsets.shape[0]): + for sc in range(nc_source): + for p in range(offsets.shape[0]): yslice = slice(ky_range[0]+offsets[p, 0], ky_range[1]+offsets[p, 0]) xslice = slice(kx_range[0]+offsets[p, 1], kx_range[1]+offsets[p, 1]) A[:, sc*offsets.shape[0]+p] = source_data[ sc, yslice, xslice].reshape((equations, 1)) - for tc in range(0, nc_target): + for tc in range(nc_target): b[:, tc] = target_data[ tc, ky_range[0]:ky_range[1], kx_range[0]:kx_range[1]].reshape((equations, 1)) @@ -188,9 +188,9 @@ def estimate_convolution_kernel(source_data, kernel_mask, offsets = np.argwhere(kernel_mask == 1) kernel = np.zeros((nc_target, nc_source, kernel_mask.shape[ 0], kernel_mask.shape[1]), dtype=np.complex64) - for tc in range(0, nc_target): - for sc in range(0, nc_source): - for p in range(0, offsets.shape[0]): + for tc in range(nc_target): + for sc in range(nc_source): + for p in range(offsets.shape[0]): kernel[tc, sc, offsets[p, 0], offsets[p, 1]] = x[ sc*offsets.shape[0]+p, tc] diff --git a/ismrmrdtools/ndarray_io.py b/ismrmrdtools/ndarray_io.py index dec9948..9938368 100644 --- a/ismrmrdtools/ndarray_io.py +++ b/ismrmrdtools/ndarray_io.py @@ -38,7 +38,7 @@ def write_ndarray(filename, ndarray): f = open(fullfilename, 'wb') dims = np.zeros((ndarray.ndim+1, 1), dtype=np.int32) dims[0] = ndarray.ndim - for d in range(0, ndarray.ndim): + for d in range(ndarray.ndim): dims[d+1] = ndarray.shape[ndarray.ndim-d-1] f.write(dims.tobytes()) f.write(ndarray.tobytes()) @@ -82,7 +82,7 @@ def read_ndarray(filename): ndims = f.read(4) ndims = unpack(' 0: # unmix1d[:,b:ny:nblocks] = np.linalg.pinv(A) diff --git a/ismrmrdtools/simulation.py b/ismrmrdtools/simulation.py index 012dba9..5f8728c 100644 --- a/ismrmrdtools/simulation.py +++ b/ismrmrdtools/simulation.py @@ -92,14 +92,14 @@ def generate_birdcage_sensitivities(matrix_size=256, number_of_coils=8, out = np.zeros( (number_of_coils, matrix_size, matrix_size), dtype=np.complex64) - for c in range(0, number_of_coils): + for c in range(number_of_coils): coilx = relative_radius*np.cos(c*(2*np.pi/number_of_coils)) coily = relative_radius*np.sin(c*(2*np.pi/number_of_coils)) coil_phase = -c*(2*np.pi/number_of_coils) - for y in range(0, matrix_size): + for y in range(matrix_size): y_co = float(y-matrix_size/2)/float(matrix_size/2)-coily - for x in range(0, matrix_size): + for x in range(matrix_size): x_co = float(x-matrix_size/2)/float(matrix_size/2)-coilx rr = np.sqrt(x_co**2+y_co**2) phi = np.arctan2(x_co, -y_co) + coil_phase From 7ede52459433935f2ca131087964462ba220cb80 Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Mon, 12 Oct 2015 23:58:50 -0500 Subject: [PATCH 07/15] MAINT: replace numpy matrices with arrays --- ismrmrdtools/coils.py | 11 ++++++----- ismrmrdtools/grappa.py | 8 ++++---- ismrmrdtools/sense.py | 8 ++++---- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/ismrmrdtools/coils.py b/ismrmrdtools/coils.py index a092015..e35af0a 100644 --- a/ismrmrdtools/coils.py +++ b/ismrmrdtools/coils.py @@ -27,9 +27,10 @@ def calculate_prewhitening(noise, scale_factor=1.0): ----- ``scale_factor = (T_acq_dwell/T_noise_dwell)*NoiseReceiverBandwidthRatio`` """ + noise = np.asanyarray(noise) noise_int = noise.reshape((noise.shape[0], noise.size/noise.shape[0])) M = float(noise_int.shape[1]) - dmtx = (1/(M-1))*np.asmatrix(noise_int)*np.asmatrix(noise_int).H + dmtx = (1/(M-1))*np.dot(noise_int, np.conj(noise_int.T)) dmtx = np.linalg.inv(np.linalg.cholesky(dmtx)) dmtx = dmtx*np.sqrt(2)*np.sqrt(scale_factor) return dmtx @@ -50,11 +51,11 @@ def apply_prewhitening(data, dmtx): w_data : (coil, ...) array Prewhitened data. """ - + data = np.asanyarray(data) + dmtx = np.asanyarray(dmtx) s = data.shape - return np.asarray( - np.asmatrix(dmtx) * np.asmatrix( - data.reshape(data.shape[0], data.size/data.shape[0]))).reshape(s) + return np.dot( + dmtx, data.reshape(data.shape[0], data.size/data.shape[0])).reshape(s) def calculate_csm_walsh(img, smoothing=5, niter=3): diff --git a/ismrmrdtools/grappa.py b/ismrmrdtools/grappa.py index 81ae049..8a459e8 100644 --- a/ismrmrdtools/grappa.py +++ b/ismrmrdtools/grappa.py @@ -160,8 +160,8 @@ def estimate_convolution_kernel(source_data, kernel_mask, equations = (ky_range[1]-ky_range[0])*(kx_range[1]-kx_range[0]) unknowns = offsets.shape[0]*nc_source - A = np.asmatrix(np.zeros((equations, unknowns), dtype=np.complex128)) - b = np.asmatrix(np.zeros((equations, nc_target), dtype=np.complex128)) + A = np.zeros((equations, unknowns), dtype=np.complex128) + b = np.zeros((equations, nc_target), dtype=np.complex128) for sc in range(nc_source): for p in range(offsets.shape[0]): @@ -181,8 +181,8 @@ def estimate_convolution_kernel(source_data, kernel_mask, "insufficient") S = np.linalg.svd(A, compute_uv=False) - A_inv = np.dot(np.linalg.pinv(np.dot(A.H, A) + np.eye(A.shape[1]) * - (regularization_factor*np.max(np.abs(S)))**2), A.H) + A_inv = np.dot(np.linalg.pinv(np.dot(np.conj(A.T), A) + np.eye(A.shape[1]) * + (regularization_factor*np.max(np.abs(S)))**2), np.conj(A.T)) x = np.dot(A_inv, b) offsets = np.argwhere(kernel_mask == 1) diff --git a/ismrmrdtools/sense.py b/ismrmrdtools/sense.py index f917cfa..81959a6 100644 --- a/ismrmrdtools/sense.py +++ b/ismrmrdtools/sense.py @@ -54,13 +54,13 @@ def _calculate_sense_unmixing_1d(acc_factor, csm1d, regularization_factor): nblocks = ny/acc_factor for b in range(nblocks): - A = np.matrix(csm1d[:, b:ny:nblocks]).T + A = csm1d[:, b:ny:nblocks].T if np.max(np.abs(A)) > 0: # unmix1d[:,b:ny:nblocks] = np.linalg.pinv(A) - AHA = A.H * A + AHA = np.dot(np.conj(A.T), A) reduced_eye = np.diag(np.abs(np.diag(AHA)) > 0) n_alias = np.sum(reduced_eye) scaled_reg_factor = regularization_factor * np.trace(AHA)/n_alias - unmix1d[:, b:ny:nblocks] = np.linalg.pinv( - AHA + (reduced_eye*scaled_reg_factor)) * A.H + unmix1d[:, b:ny:nblocks] = np.dot(np.linalg.pinv( + AHA + (reduced_eye*scaled_reg_factor)), np.conj(A.T)) return unmix1d From 00f8f4201ce60cdd4ed225ff9128a9c56c646243 Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Sat, 17 Oct 2015 20:36:32 -0400 Subject: [PATCH 08/15] BUG: fix integer division for Python 3 compatibility --- generate_cartesian_shepp_logan_dataset.py | 8 ++++---- ismrmrdtools/sense.py | 4 ++-- recon_ismrmrd_dataset.py | 6 +++--- recon_multi_reps.py | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/generate_cartesian_shepp_logan_dataset.py b/generate_cartesian_shepp_logan_dataset.py index 3556646..25f5026 100644 --- a/generate_cartesian_shepp_logan_dataset.py +++ b/generate_cartesian_shepp_logan_dataset.py @@ -17,7 +17,7 @@ def create(filename='testdata.h5', matrix_size=256, coils=8, oversampling=2, # Oversample if needed if oversampling > 1: - padding = (oversampling*phan.shape[1] - phan.shape[1])/2 + padding = (oversampling*phan.shape[1] - phan.shape[1]) // 2 phan = np.pad(phan, ((0, 0), (padding, padding)), mode='constant') csm = np.pad( csm, ((0, 0), (0, 0), (padding, padding)), mode='constant') @@ -85,13 +85,13 @@ def create(filename='testdata.h5', matrix_size=256, coils=8, oversampling=2, limits1 = ismrmrd.xsd.limitType() limits1.minimum = 0 - limits1.center = ny/2 + limits1.center = ny // 2 limits1.maximum = ny - 1 limits.kspace_encoding_step_1 = limits1 limits_rep = ismrmrd.xsd.limitType() limits_rep.minimum = 0 - limits_rep.center = repetitions / 2 + limits_rep.center = repetitions // 2 limits_rep.maximum = repetitions - 1 limits.repetition = limits_rep @@ -121,7 +121,7 @@ def create(filename='testdata.h5', matrix_size=256, coils=8, oversampling=2, acq.resize(nkx, coils) acq.version = 1 acq.available_channels = coils - acq.center_sample = nkx/2 + acq.center_sample = nkx // 2 acq.read_dir[0] = 1.0 acq.phase_dir[1] = 1.0 acq.slice_dir[2] = 1.0 diff --git a/ismrmrdtools/sense.py b/ismrmrdtools/sense.py index 81959a6..17054bb 100644 --- a/ismrmrdtools/sense.py +++ b/ismrmrdtools/sense.py @@ -52,7 +52,7 @@ def _calculate_sense_unmixing_1d(acc_factor, csm1d, regularization_factor): unmix1d = np.zeros((nc, ny), dtype=np.complex64) - nblocks = ny/acc_factor + nblocks = ny // acc_factor for b in range(nblocks): A = csm1d[:, b:ny:nblocks].T if np.max(np.abs(A)) > 0: @@ -60,7 +60,7 @@ def _calculate_sense_unmixing_1d(acc_factor, csm1d, regularization_factor): AHA = np.dot(np.conj(A.T), A) reduced_eye = np.diag(np.abs(np.diag(AHA)) > 0) n_alias = np.sum(reduced_eye) - scaled_reg_factor = regularization_factor * np.trace(AHA)/n_alias + scaled_reg_factor = regularization_factor * np.trace(AHA) / n_alias unmix1d[:, b:ny:nblocks] = np.dot(np.linalg.pinv( AHA + (reduced_eye*scaled_reg_factor)), np.conj(A.T)) return unmix1d diff --git a/recon_ismrmrd_dataset.py b/recon_ismrmrd_dataset.py index 18b1d17..b80ea7a 100644 --- a/recon_ismrmrd_dataset.py +++ b/recon_ismrmrd_dataset.py @@ -78,11 +78,11 @@ # Remove oversampling if needed if eNx != rNx: xline = transform.transform_kspace_to_image(acq.data, [1]) - x0 = (eNx - rNx) / 2 - x1 = (eNx - rNx) / 2 + rNx + x0 = (eNx - rNx) // 2 + x1 = (eNx - rNx) // 2 + rNx xline = xline[:, x0:x1] acq.resize(rNx, acq.active_channels, acq.trajectory_dimensions) - acq.center_sample = rNx/2 + acq.center_sample = rNx // 2 # need to use the [:] notation here to fill the data acq.data[:] = transform.transform_image_to_kspace(xline, [1]) diff --git a/recon_multi_reps.py b/recon_multi_reps.py index 344b7b4..83b243c 100644 --- a/recon_multi_reps.py +++ b/recon_multi_reps.py @@ -128,11 +128,11 @@ # Remove oversampling if needed if eNx != rNx: xline = transform.transform_kspace_to_image(acq_data_prw, [1]) - x0 = (eNx - rNx) / 2 - x1 = (eNx - rNx) / 2 + rNx + x0 = (eNx - rNx) // 2 + x1 = (eNx - rNx) // 2 + rNx xline = xline[:, x0:x1] acq.resize(rNx, acq.active_channels, acq.trajectory_dimensions) - acq.center_sample = rNx/2 + acq.center_sample = rNx // 2 # need to use the [:] notation here to fill the data acq.data[:] = transform.transform_image_to_kspace(xline, [1]) From a76dd1d132be9660a0b6945964f261893acc8482 Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Sat, 17 Oct 2015 21:38:55 -0400 Subject: [PATCH 09/15] BUG: fix shape error in grappa.py --- ismrmrdtools/grappa.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ismrmrdtools/grappa.py b/ismrmrdtools/grappa.py index 8a459e8..a6a4caf 100644 --- a/ismrmrdtools/grappa.py +++ b/ismrmrdtools/grappa.py @@ -170,11 +170,11 @@ def estimate_convolution_kernel(source_data, kernel_mask, xslice = slice(kx_range[0]+offsets[p, 1], kx_range[1]+offsets[p, 1]) A[:, sc*offsets.shape[0]+p] = source_data[ - sc, yslice, xslice].reshape((equations, 1)) + sc, yslice, xslice].reshape((equations, )) for tc in range(nc_target): b[:, tc] = target_data[ tc, ky_range[0]:ky_range[1], - kx_range[0]:kx_range[1]].reshape((equations, 1)) + kx_range[0]:kx_range[1]].reshape((equations, )) if A.shape[0] < 3*A.shape[1]: print("Warning: number of samples in calibration data might be " From 9818380032402b05185315361b71ed428ffd6b1a Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Mon, 19 Oct 2015 12:29:54 -0400 Subject: [PATCH 10/15] BUG: fix missing comma in __init__.py --- ismrmrdtools/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ismrmrdtools/__init__.py b/ismrmrdtools/__init__.py index da952cb..6cf6033 100644 --- a/ismrmrdtools/__init__.py +++ b/ismrmrdtools/__init__.py @@ -2,5 +2,5 @@ ISMRMRD Python Tools """ -__all__ = ["sense", "transform", "show", "simulation", "coils", "grappa" +__all__ = ["sense", "transform", "show", "simulation", "coils", "grappa", "ndarray_io"] From 51408fa04c79d2c485e0fc23afaee04dc1244434 Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Fri, 4 Dec 2015 11:26:39 -0500 Subject: [PATCH 11/15] BUG: fix integer divison in ismrmrdtools/grappa.py --- ismrmrdtools/grappa.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ismrmrdtools/grappa.py b/ismrmrdtools/grappa.py index a6a4caf..975dbce 100644 --- a/ismrmrdtools/grappa.py +++ b/ismrmrdtools/grappa.py @@ -150,8 +150,8 @@ def estimate_convolution_kernel(source_data, kernel_mask, nc_target = target_data.shape[0] offsets = np.argwhere(kernel_mask == 1) - offsets[:, 0] -= kernel_mask.shape[0]/2 - offsets[:, 1] -= kernel_mask.shape[1]/2 + offsets[:, 0] -= kernel_mask.shape[0]//2 + offsets[:, 1] -= kernel_mask.shape[1]//2 ky_range = ( 0-np.min(offsets[:, 0]), source_data.shape[1]-np.max(offsets[:, 0])) kx_range = ( From dceea51e8ff0bde578754b04bead62af57316e93 Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Fri, 4 Dec 2015 11:28:31 -0500 Subject: [PATCH 12/15] MAINT: remove the redundant .mat file section from parallel_imaging_demo --- parallel_imaging_demo.py | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/parallel_imaging_demo.py b/parallel_imaging_demo.py index 6fba4fd..0f8b86d 100644 --- a/parallel_imaging_demo.py +++ b/parallel_imaging_demo.py @@ -3,33 +3,9 @@ # Basic setup from __future__ import division, print_function, absolute_import import numpy as np -import scipy.io from ismrmrdtools import sense, grappa, show, simulation, transform, coils -# import some data -exercise_data = scipy.io.loadmat('hansen_exercises2.mat') -csm = np.transpose(exercise_data['smaps']) -pat = np.transpose(exercise_data['sp']) -data = np.transpose(exercise_data['data']) -kspace = np.logical_or(pat == 1, pat == 3).astype('float32')*(data) - -acc_factor = 4 -alias_img = transform.transform_kspace_to_image( - kspace, dim=(1, 2)) * np.sqrt(acc_factor) -show.imshow(np.abs(alias_img)) - -(unmix_grappa, gmap_grappa) = grappa.calculate_grappa_unmixing( - data, acc_factor, data_mask=pat > 1, csm=csm, kernel_size=(4, 5)) -# (unmix_grappa, gmap_grappa) = grappa.calculate_grappa_unmixing( -# data, acc_factor, data_mask=pat>1) -show.imshow(np.abs(gmap_grappa), colorbar=True) -recon_grappa = np.squeeze(np.sum(alias_img * unmix_grappa, 0)) -show.imshow(np.abs(recon_grappa), colorbar=True) - -scipy.io.savemat('tmp_data.mat', {'pat_py': pat, 'data_py': data, 'csm_py': csm, - 'alias_img_py': alias_img, - 'unmix_grappa_py': unmix_grappa}) - +# generate some data matrix_size = 256 csm = simulation.generate_birdcage_sensitivities(matrix_size) phan = simulation.phantom(matrix_size) From 804faa4a98e527a20a2bdfca0ca900f6a6e3ebd0 Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Fri, 4 Dec 2015 11:46:56 -0500 Subject: [PATCH 13/15] MAINT: add comment on data source for recon_ismrmrd_dataset.py --- recon_ismrmrd_dataset.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/recon_ismrmrd_dataset.py b/recon_ismrmrd_dataset.py index b80ea7a..5f6669c 100644 --- a/recon_ismrmrd_dataset.py +++ b/recon_ismrmrd_dataset.py @@ -8,6 +8,8 @@ from ismrmrdtools import show, transform # Load file +# Can generate testdata.h5 using ismrmrd via: +# ismrmrd_generate_cartesian_shepp_logan filename = '/tmp/testdata.h5' if not os.path.isfile(filename): print("%s is not a valid file" % filename) From 9d91b632f6079ef99d0032b99f7072da401d89af Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Fri, 4 Dec 2015 11:51:25 -0500 Subject: [PATCH 14/15] DOC: fix typo in calculate_grapp_unmixing docstring --- ismrmrdtools/grappa.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ismrmrdtools/grappa.py b/ismrmrdtools/grappa.py index 975dbce..89e0596 100644 --- a/ismrmrdtools/grappa.py +++ b/ismrmrdtools/grappa.py @@ -22,7 +22,7 @@ def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4, 5), all of the source data. csm : (coil, y, x) array or None, optional Coil sensitivity map. (used for b1-weighted combining. Will be - estimated from calibratino data if not supplied.) + estimated from calibration data if not supplied.) regularization_factor : float, optional Tikhonov regularization weight. - 0 = no regularization From cded37b0e7adede7799d16e730696dbf26fab4be Mon Sep 17 00:00:00 2001 From: "Gregory R. Lee" Date: Fri, 4 Dec 2015 11:59:44 -0500 Subject: [PATCH 15/15] MAINT: use array broadcasting rather than asmatrix in calculate_grappa_unmixing --- ismrmrdtools/grappa.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ismrmrdtools/grappa.py b/ismrmrdtools/grappa.py index 89e0596..cd5a267 100644 --- a/ismrmrdtools/grappa.py +++ b/ismrmrdtools/grappa.py @@ -53,14 +53,14 @@ def calculate_grappa_unmixing(source_data, acc_factor, kernel_size=(4, 5), if csm is None: # Assume calibration data is in the middle - f = np.asarray(np.asmatrix(np.hamming(np.max( - np.sum(data_mask, 0)))).T * np.asmatrix( - np.hamming(np.max(np.sum(data_mask, 1))))) + hy = np.hamming(np.max(np.sum(data_mask, 0))) + hx = np.hamming(np.max(np.sum(data_mask, 1))) + f = hy[:, np.newaxis] * hx[np.newaxis, :] fmask = np.zeros( (source_data.shape[1], source_data.shape[2]), dtype=np.complex64) idx = np.argwhere(data_mask == 1) fmask[idx[:, 0], idx[:, 1]] = f.reshape(idx.shape[0]) - fmask = np.tile(fmask[None, :, :], (nc_source, 1, 1)) + fmask = np.tile(fmask[np.newaxis, :, :], (nc_source, 1, 1)) csm = fftshift( ifftn( ifftshift(source_data * fmask, axes=(1, 2)),