diff --git a/CONTRIBUTING.Rmd b/CONTRIBUTING.Rmd index ad189213..c1e7ab2a 100644 --- a/CONTRIBUTING.Rmd +++ b/CONTRIBUTING.Rmd @@ -18,17 +18,17 @@ All contributors retain the original copyright to their stuff, but by contributi **New code file:** At the top of the file, please ensure copyright and year is attributed to collaborator and assign the Apache 2.0 license -**Major addition to code file:** “Copyright YYYY Province of British Columbia” and Apache 2.0 remains as a header and either the second collaborator is added if there are changes throughout the code or copyright is listed for specific lines of code. So it could read: +**Major addition to code file:** “Copyright 2018-2024 Province of British Columbia” and Apache 2.0 remains as a header and either the second collaborator is added if there are changes throughout the code or copyright is listed for specific lines of code. So it could read: -Copyright 2020-2022 Province of British Columbia +Copyright 2018-2024 Province of British Columbia Copyright 2024 Collaborator Apache 2.0 License Or -Copyright 2020-2022 Province of British Columbia +Copyright 2018-2024 Province of British Columbia Copyright 2024 Collaborator Lines 200-500 Apache 2.0 License -**Minor changes:** If there are small changes to the code throughout the file then it may be easiest to keep these files as Copyright YYYY Province of British Columbia. +**Minor changes:** If there are small changes to the code throughout the file then it may be easiest to keep these files as Copyright 2018-2024 Province of British Columbia. However, the contribution will be tracked through GitHub. diff --git a/R/licensing.R b/R/licensing.R index 8c134487..050389c1 100644 --- a/R/licensing.R +++ b/R/licensing.R @@ -7,11 +7,9 @@ #' licensing_md() licensing_md <- function() { "## Licensing - - Copyright 2024 Province of British Columbia, - Environment and Climate Change Canada, and - Australian Government Department of - Climate Change, Energy, the Environment and Water + Copyright 2018-2024 Province of British Columbia\\ + Copyright 2021 Environment and Climate Change Canada\\ + Copyright 2023-2024 Australian Government Department of Climate Change, Energy, the Environment and Water The documentation is released under the [CC BY 4.0 License](https://creativecommons.org/licenses/by/4.0/) diff --git a/_pkgdown.yml b/_pkgdown.yml index c65275f0..a6db8ccb 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -119,3 +119,4 @@ articles: - articles/confidence-intervals - embelishing-plots - additional-technical-details + - faqs diff --git a/paper/paper.bib b/paper/paper.bib index 72a3bc8e..f55fa85b 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -122,14 +122,16 @@ @article{thorley2018ssdtools volume={3}, number={31}, pages={1082}, - year={2018} + year={2018}, + url = {https://joss.theoj.org/papers/10.21105/joss.01082} } @article{barry2012burrlioz, title={Burrlioz 2.0 Manual}, author={Barry, S and Henderson, B}, - journal={Commonwealth Science and Industrical Research Organisation.[cited 2017 September 14]. Availab le from: https://research. csiro. au/software/burrlioz}, - year={2012} + journal={Commonwealth Science and Industrical Research Organisation}, + year={2012}, + url={https://research.csiro.au/software/burrlioz} } @article{fox_recent_2021, @@ -180,6 +182,7 @@ @techreport{Warne2018 title = {{Revised Method for Deriving Australian and New Zealand Water Quality Guideline Values for Toxicants} – update of 2015 version.}, year = {2018}, publisher = {{Prepared for the Revision of the Australian and New Zealand Guidelines for Fresh and Marine Water Quality. Australian and New Zealand Governments and Australian state and territory governments, Canberra}, 48 pp}, + url = {https://www.waterquality.gov.au/sites/default/files/documents/warne-wqg-derivation2018.pdf} } @article{lepper2005manual, @@ -188,7 +191,8 @@ @article{lepper2005manual journal={Schmallenberg, Germany: Fraunhofer-Institute Molecular Biology and Applied Ecology}, volume={15}, pages={51--52}, - year={2005} + year={2005}, + url = {https://www.helpdeskwater.nl/publish/pages/131519/_16__manual_on_the_methodological_framework_to_derive_environmental_quality_standards_for_priori.pdf} } @techreport{bcmecc2019, @@ -196,6 +200,7 @@ @techreport{bcmecc2019 title = {{Derivation of Water Quality Guidelines for the Protection of Aquatic Life in British Columbia}. Water Quality Guideline Series, WQG‐06}, year = {2019}, publisher = {Province of British Columbia, Victoria, BC, Canada}, + url = {https://www2.gov.bc.ca/assets/gov/environment/air-land-water/water/waterquality/water-quality-guidelines/derivation-protocol/bc_wqg_aquatic_life_derivation_protocol.pdf&ved=2ahUKEwip7IfF_PCFAxUxqlYBHY0YBNMQFnoECBUQAQ&usg=AOvVaw16moKPwHywW1cutwlQaMoK} } @techreport{USEPA2020, diff --git a/vignettes/articles/confidence-intervals.Rmd b/vignettes/articles/confidence-intervals.Rmd index 9f8fa020..4bd1075e 100644 --- a/vignettes/articles/confidence-intervals.Rmd +++ b/vignettes/articles/confidence-intervals.Rmd @@ -24,129 +24,121 @@ knitr::opts_chunk$set( Bootstrapping is a resampling technique used to obtain estimates of summary statistics. The team have explored the use of alternative methods for obtaining the confidence interval of *HCx* estimates. -This included using the closed-form expression for the variance-covariance matrix of the parameters -of the Burr III distribution, coupled with the delta-method, as well as an alternative bootstrap method -for the inverse Pareto distribution based on statistical properties of the parameters [@fox_methodologies_2021]. In both cases, it -appeared that these methods can give results similar to other traditional bootstrapping approaches -in much less time, and are therefore potentially worth further investigation. However, -implementation of such methods across all the distributions now available in ssdtools would be a -substantial undertaking. +This included using the closed-form expression for the variance-covariance matrix of the parameters of the Burr III distribution, coupled with the delta-method, as well as an alternative bootstrap method for the inverse Pareto distribution based on statistical properties of the parameters [@fox_methodologies_2021]. +In both cases, it appeared that these methods can give results similar to other traditional bootstrapping approaches in much less time, and are therefore potentially worth further investigation. +However, implementation of such methods across all the distributions now available in ssdtools would be a substantial undertaking. -The revised version of ssdtools retains the computationally intensive bootstrapping method to obtain confidence intervals and an estimate of standard errors. We recommend a minimum bootstrap sample of 1,000 (the current default - see argument nboot in *?ssd_hc()*). However, more reliable results can be obtained using samples of 5,000 or 10,000. We recommend larger bootstrap samples for final reporting. +The revised version of ssdtools retains the computationally intensive bootstrapping method to obtain confidence intervals and an estimate of standard errors. +We recommend a minimum bootstrap sample of 1,000 (the current default - see argument nboot in *?ssd_hc()*). +However, more reliable results can be obtained using samples of 5,000 or 10,000. +We recommend larger bootstrap samples for final reporting. ## Parametric versus non-parametric bootstrapping -Burrlioz 2.0 uses a non-parametric bootstrap method to obtain confidence intervals on the *HCx* -estimate. Non-parametric bootstrapping is carried out by repeatedly resampling the raw data with -replacement, and refitting the distribution many times. The 95% confidence limits are then obtained -by calculating the lower 0.025th and upper 0.975th quantiles of the resulting *HCx* estimates across all56 -the bootstrap samples (typically >1000). This type of bootstrap takes into account uncertainty in the -distribution fit based on uncertainty in the data. - -The ssdtools package by default uses a parametric bootstrap. Instead of resampling the data, -parametric bootstrapping draws a random a set of new data (of the same sample size as the original) -from the fitted distribution to repeatedly refit the distribution. Upper and lower 95% bounds are again -calculated as the lower 0.025th and upper 0.975th quantiles of the resulting *HCx* estimates across all the -bootstrap samples (again, typically >1000). This will capture the possible uncertainty that may occur -for a sample size from a given distribution, but it assumes no uncertainty in that original fit, so it is not +Burrlioz 2.0 uses a non-parametric bootstrap method to obtain confidence intervals on the *HCx* estimate. +Non-parametric bootstrapping is carried out by repeatedly resampling the raw data with replacement, and refitting the distribution many times. +The 95% confidence limits are then obtained by calculating the lower 0.025th and upper 0.975th quantiles of the resulting *HCx* estimates across all the bootstrap samples (typically >1000). +This type of bootstrap takes into account uncertainty in the distribution fit based on uncertainty in the data. + +The ssdtools package by default uses a parametric bootstrap. +Instead of resampling the data, parametric bootstrapping draws a random a set of new data (of the same sample size as the original) from the fitted distribution to repeatedly refit the distribution. +Upper and lower 95% bounds are again calculated as the lower 0.025th and upper 0.975th quantiles of the resulting *HCx* estimates across all the bootstrap samples (again, typically >1000). +This will capture the possible uncertainty that may occur for a sample size from a given distribution, but it assumes no uncertainty in that original fit, so it is not accounting for uncertainty in the input data. -The new TMB version of ssdtools has the capacity to do bootstrapping either using the Burrlioz -non-parametric method, or the original parametric method of ssdtools (based on +The new TMB version of ssdtools has the capacity to do bootstrapping either using the Burrlioz non-parametric method, or the original parametric method of ssdtools (based on fitdistrplus [@fitdistrplus]). -Using simulation studies the ssdtools team examined bias and compared the resulting coverage -of the parametric and non-parametric bootstrapping methods [@fox_methodologies_2021]. They found that coverage was better using the parametric bootstrapping method, and this has been retained as the default bootstrapping method in the update to ssdtools. +Using simulation studies the ssdtools team examined bias and compared the resulting coverage of the parametric and non-parametric bootstrapping methods [@fox_methodologies_2021]. +They found that coverage was better using the parametric bootstrapping method, and this has been retained as the default bootstrapping method in the update to ssdtools. ## Bootstrapping model-averaged SSDs -Bootstrapping to obtain confidence intervals for individual fitted distributions is relatively straightforward. However, obtaining bootstrap confidence intervals for model-averaged SSDs requires careful consideration, as the procedure is subject to the same pitfalls evident when obtaining model-averaged *HCx* estimates. The [Model Average SSDs](https://poissonconsulting.github.io/ssdtools/articles/A_model_averaging.html) vignette contains a detailed explanation of the fallacy of using the summed weighting of individual *HCx* values (as weighted arithmetic average), and how this can lead to spurious results. Model-averaged estimates and/or confidence intervals (including standard error) can be calculated by treating the distributions as constituting a single mixture distribution versus 'taking the mean'. When calculating the model-averaged estimates treating the distributions as constituting a single mixture distribution ensures that *ssd_hc()* is the inverse of *ssd_hp()*, and this applies for model-averaged confidence intervals. +Bootstrapping to obtain confidence intervals for individual fitted distributions is relatively straightforward. +However, obtaining bootstrap confidence intervals for model-averaged SSDs requires careful consideration, as the procedure is subject to the same pitfalls evident when obtaining model-averaged *HCx* estimates. +The [Model Average SSDs](/model_averaging.html) vignette contains a detailed explanation of the fallacy of using the summed weighting of individual *HCx* values (as weighted arithmetic average), and how this can lead to spurious results. +Model-averaged estimates and/or confidence intervals (including standard error) can be calculated by treating the distributions as constituting a single mixture distribution versus 'taking the mean'. +When calculating the model-averaged estimates treating the distributions as constituting a single mixture distribution ensures that *ssd_hc()* is the inverse of *ssd_hp()*, and this applies for model-averaged confidence intervals. -The revised version of ssdtools supports three weighting methods for obtaining bootstrap confidence intervals and an estimate of the standard error, and these are discussed in detail below. +The revised version of ssdtools supports four methods for obtaining bootstrap confidence intervals, and these are discussed in detail below. ### Weighted arithmetic mean -The early versions of ssdtools provided model-averaged confidence intervals (cis) and standard errors (se) that were calculated as weighted arithmetic means of the upper and lower cis and se values obtained via bootstrap simulation from each of the individual candidate distributions independently. This method is incorrect and may lead to spurious results (as described above) and has been shown via simulations studies to result in confidence intervals with very low coverage. The current version of ssdtools retains the functionality to reproduce the original behavior of ssdtools. +The early versions of ssdtools provided model-averaged confidence intervals (cis) and standard errors (se) that were calculated as weighted arithmetic means of the upper and lower cis and se values obtained via bootstrap simulation from each of the individual candidate distributions independently. +This method is incorrect and may lead to spurious results (as described above) and has been shown via simulations studies to result in confidence intervals with very low coverage. +The current version of ssdtools retains the functionality to reproduce the original behavior of ssdtools. -```{r hc1, eval=FALSE} +```{r hc1} fit <- ssd_fit_dists(data = ssddata::ccme_silver) -set.seed = 99 +set.seed(99) -# Using the original ssdtools weighted arithmetic mean -hc1 <- ssd_hc(fit, ci = TRUE, multi_est = FALSE, multi_ci = FALSE, weighted = FALSE) +ssd_hc(fit, ci = TRUE, multi_est = FALSE, ci_method = "weighted_arithmetic") ``` -```{r} -hc1 -``` -Use of this method for obtaining ci and se values is not recommended and only retained for legacy comparison purposes. It is both technically incorrect, and computationally inefficient. +Use of this method for obtaining ci and se values is not recommended and only retained for legacy comparison purposes. +It is both technically incorrect, and computationally inefficient. ### Weighted mixture distribution -A more theoretically correct way of obtaining ci and se values is to consider the model average set as a mixture distribution (see above, and the [Model Average SSDs](https://poissonconsulting.github.io/ssdtools/articles/A_model_averaging.html) vignette). When we consider the model set as a mixture distribution, bootstrapping is achieved by resampling from the model set according to the AICc based model weights. A method for sampling from mixture distributions has been implemented in ssdtools, via the function *ssd_rmulti()*, which will generate random samples from a mixture of any combination of distributions currently implemented in `ssdtools`. Setting "multi_ci = TRUE" in the *ssd_hc()* call will ensure that bootstrap samples are drawn from a mixture distribution, instead of individual candidate distributions. - -When bootstrapping from the mixture distribution, a question arises whether the model weights should be re-estimated for every bootstrap sample, or fixed at the values estimated from the models fitted to the original sample of toxicity data? This is an interesting question that may warrant further investigation, however our current view is that they should be fixed at their nominal values in the same way that the component distributions to be used in bootstrapping are informed by the fit to the sample toxicity data. Using simulation studies we explored the coverage and bias of ci values obtained without and without fixing the distribution weights, and results indicate little difference. - -If treating the distributions as a single mixture distribution when calculating model average confidence intervals (i.e. with "multi_ci = TRUE"), then setting "weighted = FALSE" specifies to use the original model weights. Setting "weighted = TRUE" will result in bootstrapping that will re-estimate weights for each bootstrap sample. - -The following code can be used to obtain confidence intervals for *HCx* estimates via bootstrapping from the weighted mixture distribution (using *ssd_rmutli()*), with and without fixed weight values respectively. +A more theoretically correct way of obtaining ci and se values is to consider the model average set as a mixture distribution (see above, and the [Model Average SSDs](model_averaging.html) vignette). +When we consider the model set as a mixture distribution, bootstrapping is achieved by resampling from the model set according to the AICc based model weights. +A method for sampling from mixture distributions has been implemented in ssdtools, via the function *ssd_rmulti()*, which will generate random samples from a mixture of any combination of distributions currently implemented in `ssdtools`. +When bootstrapping from the mixture distribution, a question arises whether the model weights should be re-estimated for every bootstrap sample, or fixed at the values estimated from the models fitted to the original sample of toxicity data? +This is an interesting question that may warrant further investigation, however our current view is that they should be fixed at their nominal values in the same way that the component distributions to be used in bootstrapping are informed by the fit to the sample toxicity data. +Using simulation studies we explored the coverage and bias of ci values obtained without and without fixing the distribution weights, and results indicate little difference. -```{r hc2, eval=FALSE} -# Using the rmulti boostrapping method with fixed weights -hc2 <- ssd_hc(fit, ci = TRUE, multi_est = TRUE, multi_ci = TRUE, weighted = FALSE) -``` +The following code can be used to obtain confidence intervals for *HCx* estimates via bootstrapping from the weighted mixture distribution (using *ssd_rmulti()*), with and without fixed weight values respectively. -```{r} -hc2 -``` -```{r hc3, eval=FALSE} -# Using the rmulti boostrapping method with fixed weights -hc3 <- ssd_hc(fit, ci = TRUE, multi_est = TRUE, multi_ci = TRUE, weighted = TRUE) +```{r hc2} +# Using the multi boostrapping method with fixed weights +ssd_hc(fit, ci = TRUE, ci_method = "multi_fixed") ``` -```{r} -hc3 +```{r hc3} +# Using the multi boostrapping method without fixed weights +ssd_hc(fit, ci = TRUE, ci_method = "multi_free") ``` Use of this method (without or without fixed weights) is theoretically correct, but is computationally very inefficient. ### Weighted bootstrap sample -The developers of `ssdtools` investigated a third method for obtaining confidence intervals for the model-averaged SSD. This method bootstraps from each of the distributions individually, taking a weighted sample from each, and then combining these into a pooled bootstrap sample for estimation of te ci and se values. Psuedo code for this method is as follows: +The developers of `ssdtools` investigated a third method for obtaining confidence intervals for the model-averaged SSD. +This method bootstraps from each of the distributions individually, taking a weighted sample from each, and then combining these into a pooled bootstrap sample for estimation of te ci and se values. +Pseudo code for this method is as follows: - * For each distribution in the `fitdists` object, the proportional number of bootstrap samples to draw (`nboot_vals`) is found using `round(nboot * weight)`, where `nboot` is the total number of bootstrap samples and weight is the AICc based model weights for each distribution based on the original `ssd_fitdist` fit. +- For each distribution in the `fitdists` object, the proportional number of bootstrap samples to draw (`nboot_vals`) is found using `round(nboot * weight)`, where `nboot` is the total number of bootstrap samples and weight is the AICc based model weights for each distribution based on the original `ssd_fitdist` fit. -* For each of the `nboot_vals` for each distribution, a random sample of size N is drawn (the total number of original data points included in the original SSD fit) based on the estimated parameters from the original data for that distribution. +- For each of the `nboot_vals` for each distribution, a random sample of size N is drawn (the total number of original data points included in the original SSD fit) based on the estimated parameters from the original data for that distribution. -* The random sample is re-fitting using that distribution. +- The random sample is re-fitting using that distribution. -* *HCx* is estimated from the re-fitted bootstrap fit. +- *HCx* is estimated from the re-fitted bootstrap fit. -* The *HCx* estimates for all `nboot_vals` for each distribution are then pooled across all distributions, and *quantile()* is used to determine the lower and upper confidence bounds for this pooled weighted bootstrap sample of *HCx* values. +- The *HCx* estimates for all `nboot_vals` for each distribution are then pooled across all distributions, and *quantile()* is used to determine the lower and upper confidence bounds for this pooled weighted bootstrap sample of *HCx* values. -This method does not draw random samples from the mixture distribution using *ssd_rmulti* (thus "multi_ci = FALSE"). While mathematically the method shares some properties with obtaining *HCx* estimates via summing the weighted values (weighted arithmetic mean), simulation studies have shown that, as a method for obtaining confidence intervals, this pooled weighted sample method yields similar ci values and coverage the *ssd_rmulti()* method, and is computationally much faster. +This method does not draw random samples from the mixture distribution using *ssd_rmulti*. +While mathematically the method shares some properties with obtaining *HCx* estimates via summing the weighted values (weighted arithmetic mean), simulation studies have shown that, as a method for obtaining confidence intervals, this pooled weighted sample method yields similar ci values and coverage the *ssd_rmulti()* method, and is computationally much faster. -This method is currently the default method in ssdtools, and can be implemented by setting "multi_ci = FALSE" and "weighted = TRUE" in the ssd_hc() call. +This method is currently the default method in ssdtools, and can be implemented by simply calling `ssd_hc()`. -```{r hc4, eval=FALSE} +```{r hc4} # Using a weighted pooled bootstrap sample -hc4 <- ssd_hc(fit, ci = TRUE, multi_est = FALSE, multi_ci = FALSE, weighted = TRUE) +ssd_hc(fit, ci = TRUE) ``` -```{r} -hc4 -``` - -Here, the argument "weighted = TRUE" specifies to take bootstrap samples from each distribution proportional to its weight (so that they sum to nboot). ## Comparing bootrapping methods -We have undertaken extensive simulation studies comparing the implemented methods, and the results of these are reported elsewhere. For illustrative purposes, here we compare upper and lower confidence intervals using only a single example data set, the Silver data set from the Canadian Council of Ministers of the Environment (ccme). +We have undertaken extensive simulation studies comparing the implemented methods, and the results of these are reported elsewhere. +For illustrative purposes, here we compare upper and lower confidence intervals using only a single example data set, the Silver data set from the Canadian Council of Ministers of the Environment (ccme). -Using the default settings for ssdtools, we compared the upper and lower confidence intervals for the four bootstrapping methods described above. Estimate upper confidence limits are relatively similar among the four methods. However, the lower confidence interval obtained using the weighted arithmetic mean (the method implemented in earlier versions of ssdtools) is much higher than the other three methods, potentially accounting for the relatively poor coverage of this method in our simulation studies. +Using the default settings for ssdtools, we compared the upper and lower confidence intervals for the four bootstrapping methods described above. +Estimate upper confidence limits are relatively similar among the four methods. +However, the lower confidence interval obtained using the weighted arithmetic mean (the method implemented in earlier versions of ssdtools) is much higher than the other three methods, potentially accounting for the relatively poor coverage of this method in our simulation studies. ```{r fig.width=7,fig.height=5} library(ggplot2) @@ -165,8 +157,8 @@ ggarrange(p1, p2,common.legend = TRUE) Given the similarity of upper and lower confidence intervals of the weighted bootstrap sample method compared to the potentially more theoretically correct, but computationally more intensive weighted mixture method (via *ssd_rmulti()*), we also compared the time taken to undertake bootstrapping across the methods. -Using the default 1,000 bootstrap samples, the elapsed time to undertake bootstrapping for the mixture method was `r t2["elapsed"]` seconds, compared to `r t4["elapsed"]` seconds for the weighted bootstrap sample. This means that the weighted bootstrap method is ~ `r round(t2["elapsed"]/t4["elapsed"])` times faster, representing a considerable computational saving across many SSDs. For this reason, this method is currently set as the default method for confidence interval estimation in ssdtools. - +Using the default 1,000 bootstrap samples, the elapsed time to undertake bootstrapping for the mixture method was `r t2["elapsed"]` seconds, compared to `r t4["elapsed"]` seconds for the weighted bootstrap sample. +This means that the weighted bootstrap method is ~ `r round(t2["elapsed"]/t4["elapsed"])` times faster, representing a considerable computational saving across many SSDs. For this reason, this method is currently set as the default method for confidence interval estimation in ssdtools. ```{r fig.width=7,fig.height=5} p3 <- ggplot(compare_dat, aes(method, time, fill = method)) + diff --git a/vignettes/faqs.Rmd b/vignettes/faqs.Rmd new file mode 100644 index 00000000..db57af80 --- /dev/null +++ b/vignettes/faqs.Rmd @@ -0,0 +1,66 @@ +--- +title: "Frequently Asked Questions" +author: "ssdtools Team" +date: "`r Sys.Date()`" +bibliography: references.bib +csl: my-style.csl +latex_engine: MathJax +mainfont: Arial +mathfont: Courier +output: rmarkdown::html_vignette +#output: rmarkdown::pdf_document +vignette: > + %\VignetteIndexEntry{Frequently Asked Questions} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>" +) +``` + + +## How can I plot the model averaged fit with individual fits? + +```{r, fig.width = 5, fig.height = 5} +library(ssdtools) + +dist <- ssd_fit_dists(ssddata::ccme_boron) +ssd_plot_cdf(dist, average = NA) +``` + +## How do I fit distributions to multiple groups such taxa and/or chemicals? + +An elegant approach using some tidyverse packages is demonstrated below. + +```{r, message=FALSE} +library(ssddata) +library(ssdtools) +library(ggplot2) +library(dplyr) +library(tidyr) +library(purrr) + +boron_preds <- nest(ccme_boron, data = c(Chemical, Species, Conc, Units)) %>% + mutate( + Fit = map(data, ssd_fit_dists, dists = "lnorm"), + Prediction = map(Fit, predict) + ) %>% + unnest(Prediction) +``` + +The resultant data and predictions can then be plotted as follows. +```{r, fig.width = 5, fig.height = 5} +ssd_plot(ccme_boron, boron_preds, xlab = "Concentration (mg/L)", ci = FALSE) + + facet_wrap(~Group) +``` + +
+ +```{r, results = "asis", echo = FALSE} +cat(licensing_md()) +``` +