diff --git a/pymbar/confidenceintervals.py b/pymbar/confidenceintervals.py index 0a196bb9..7198a50e 100644 --- a/pymbar/confidenceintervals.py +++ b/pymbar/confidenceintervals.py @@ -148,6 +148,7 @@ def qq_plot(replicates, K, title="Generic Q-Q plot", filename="qq.pdf"): N = len(replicates) dim = len(np.shape(replicates[0]["error"])) xvals = scipy.stats.norm.ppf((np.arange(0, N) + 0.5) / N) # inverse pdf + nplots = None # keep lint happy when variables are defined in conditionals if dim == 0: nplots = 1 diff --git a/pymbar/fes.py b/pymbar/fes.py index b1dbbd05..9c95f3ca 100644 --- a/pymbar/fes.py +++ b/pymbar/fes.py @@ -1,7 +1,7 @@ ############################################################################## # pymbar: A Python Library for MBAR (FES module) # -# Copyright 2019 University of Colorado Boulder +# Copyright 2019-2024 University of Colorado Boulder # # Authors: Michael Shirts # @@ -399,11 +399,11 @@ def generate_fes( 0, N_k[k], size=N_k[k] ) index += N_k[k] - # recompute MBAR. - mbar = pymbar.MBAR( - self.u_kn[:, bootstrap_indices], self.N_k, initial_f_k=self.mbar.f_k - ) - x_nb = x_n[bootstrap_indices] + # recompute MBAR. + mbar = pymbar.MBAR( + self.u_kn[:, bootstrap_indices], self.N_k, initial_f_k=self.mbar.f_k + ) + x_nb = x_n[bootstrap_indices] # Compute unnormalized log weights for the given reduced potential # u_n, needed for all methods. @@ -692,8 +692,9 @@ def _generate_fes_kde(self, b, x_n, w_n): params = self.kde.get_params() # pylint: disable=access-member-before-definition kde.set_params(**params) else: - kde = self.kde - kde.fit(x_n, sample_weight=self.w_n) + kde = self.kde # use these new weights for the KDE + w_n = self.w_n + kde.fit(x_n, sample_weight=w_n) if b > 0: self.kdes.append(kde) @@ -1358,6 +1359,7 @@ def _get_fes_histogram( raise ParameterError("Specified reference point for FES not given") if reference_point in ["from-lowest", "from-specified", "all-differences"]: + j = None # keep lint happy if reference_point == "from-lowest": # Determine free energy with lowest free energy to serve as reference point j = histogram_data["f"].argmin() @@ -1415,8 +1417,8 @@ def _get_fes_histogram( fall = np.zeros([len(histogram_data["f"]), n_bootstraps]) for b in range(n_bootstraps): h = histogram_datas[b] # just to make this shorter - fall[:, b] = h["f"] - h["f"][j] - df_i = np.std(fall, axis=1) + fall[:, b] = h["f"] - h["f"][j] # subtract out the reference bin + df_i = np.std(fall, ddof=1, axis=1) elif reference_point == "from-normalization": # Determine uncertainties from normalization that \sum_i p_i = 1. @@ -1510,7 +1512,7 @@ def _get_fes_histogram( fall[i, j, b] = ( histogram_datas[b]["f"] - histogram_datas[b]["f"].transpose() ) - dfxij_vals = np.std(fall, axis=2) + dfxij_vals = np.std(fall, ddof=1, axis=2) if uncertainty_method is not None: # Return dimensionless free energy and uncertainty. result_vals["df_ij"] = dfxij_vals @@ -1565,9 +1567,15 @@ def _get_fes_kde( if reference_point == "from-lowest": fmin = np.min(f_i) f_i = f_i - fmin + wheremin = np.argmin( + f_i + ) # needed or uncertainties, as we zero by location, not values elif reference_point == "from-specified": fmin = -self.kde.score_samples(np.array(fes_reference).reshape(1, -1)) f_i = f_i - fmin + wheremin = np.argmin( + f_i + ) # needed for uncertainties, as we zero by location, not values elif reference_point == "from-normalization": # uncertainites "from normalization" reference is already applied, since # the density is normalized. @@ -1594,8 +1602,9 @@ def _get_fes_kde( fall = np.zeros([len(x), n_bootstraps]) for b in range(n_bootstraps): - fall[:, b] = -self.kdes[b].score_samples(x) - fmin - df_i = np.std(fall, axis=1) + fall[:, b] = -self.kdes[b].score_samples(x) + fall[:, b] -= fall[wheremin, b] + df_i = np.std(fall, ddof=1, axis=1) else: raise ParameterError( f"Uncertainty method {uncertainty_method} for kde is not implemented" @@ -1681,7 +1690,7 @@ def _get_fes_spline( fall = np.zeros(dim_breakdown) for b in range(n_bootstraps): fall[:, b] = self.fes_functions[b](x) - fmin - df_i = np.std(fall, axis=-1) + df_i = np.std(fall, ddof=1, axis=-1) # uncertainites "from normalization" reference is applied, since # the density is normalized. diff --git a/pymbar/mbar.py b/pymbar/mbar.py index 893a46a9..bea47f16 100644 --- a/pymbar/mbar.py +++ b/pymbar/mbar.py @@ -851,6 +851,7 @@ def compute_expectations_inner( K = self.K N = self.N # N is total number of samples result_vals = dict() # dictionary we will store uncertainties in + Theta_ij = None # keep lint happy when variables are defined in conditionals. # make observables all positive, allowing us to take the logarithm, which is # required to prevent overflow in some examples.