diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000..4edd7b1a --- /dev/null +++ b/.coveragerc @@ -0,0 +1,2 @@ +[run] +relative_files = True diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index d14d1965..0981db81 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -43,16 +43,17 @@ Describe the steps to reproduce the behavior: - [ ] Select **scientist(s)** or **no scientist** required ### Labels ### +- [ ] Review default **alert** labels - [ ] Select **component(s)** - [ ] Select **priority** - [ ] Select **requestor(s)** -### Projects and Milestone ### -- [ ] Select **Organization** level **Project** for support of the current coordinated release -- [ ] Select **Repository** level **Project** for development toward the next official release or add **alert: NEED CYCLE ASSIGNMENT** label +### Milestone and Projects### - [ ] Select **Milestone** as the next bugfix version +- [ ] Select **Coordinated METplus-X.Y Support** project for support of the current coordinated release +- [ ] Select **METplotpy-X.Y.Z Development** project for development toward the next official release -## Define Related Issue(s) ## +## Define Related Issue(s)## Consider the impact to the other METplus components. - [ ] [METplus](https://github.com/dtcenter/METplus/issues/new/choose), [MET](https://github.com/dtcenter/MET/issues/new/choose), [METdataio](https://github.com/dtcenter/METdataio/issues/new/choose), [METviewer](https://github.com/dtcenter/METviewer/issues/new/choose), [METexpress](https://github.com/dtcenter/METexpress/issues/new/choose), [METcalcpy](https://github.com/dtcenter/METcalcpy/issues/new/choose), [METplotpy](https://github.com/dtcenter/METplotpy/issues/new/choose) @@ -65,20 +66,20 @@ Branch name: `bugfix__main__` - [ ] Add/update log messages for easier debugging. - [ ] Add/update unit tests. - [ ] Add/update documentation. -- [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/overview.html#metplus-components-python-requirements) table. +- [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/appendixA.html#metplus-components-python-packages) table. - [ ] Push local changes to GitHub. - [ ] Submit a pull request to merge into **main_\**. Pull request: `bugfix main_ ` - [ ] Define the pull request metadata, as permissions allow. -Select: **Reviewer(s)** and **Development** issues -Select: **Organization** level software support **Project** for the current coordinated release +Select: **Reviewer(s)** and **Development** issue Select: **Milestone** as the next bugfix version +Select: Coordinated METplus-X.Y Support project for support of the current coordinated release - [ ] Iterate until the reviewer(s) accept and merge your changes. - [ ] Delete your fork or branch. - [ ] Complete the steps above to fix the bug on the **develop** branch. Branch name: `bugfix__develop_` Pull request: `bugfix develop ` -Select: **Reviewer(s)** and **Development** issues -Select: **Repository** level development cycle **Project** for the next official release +Select: **Reviewer(s)** and **Development** issue Select: **Milestone** as the next official version +Select: **METplotpy-X.Y.Z Development** project for development toward the next official release - [ ] Close this issue. diff --git a/.github/ISSUE_TEMPLATE/enhancement_request.md b/.github/ISSUE_TEMPLATE/enhancement_request.md index c6f4f973..ac162de1 100644 --- a/.github/ISSUE_TEMPLATE/enhancement_request.md +++ b/.github/ISSUE_TEMPLATE/enhancement_request.md @@ -35,13 +35,14 @@ Consider breaking the enhancement down into sub-issues. - [ ] Select **scientist(s)** or **no scientist** required ### Labels ### +- [ ] Review default **alert** labels - [ ] Select **component(s)** - [ ] Select **priority** - [ ] Select **requestor(s)** -### Projects and Milestone ### -- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label -- [ ] Select **Milestone** as the next official version or **Future Versions** +### Milestone and Projects ### +- [ ] Select **Milestone** as a **METplotpy-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** +- [ ] For a **METplotpy-X.Y.Z** version, select the **METplotpy-X.Y.Z Development** project ## Define Related Issue(s) ## Consider the impact to the other METplus components. @@ -56,14 +57,14 @@ Branch name: `feature__` - [ ] Add/update log messages for easier debugging. - [ ] Add/update unit tests. - [ ] Add/update documentation. -- [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/overview.html#metplus-components-python-requirements) table. +- [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/appendixA.html#metplus-components-python-packages) table. - [ ] Push local changes to GitHub. - [ ] Submit a pull request to merge into **develop**. Pull request: `feature ` - [ ] Define the pull request metadata, as permissions allow. -Select: **Reviewer(s)** and **Development** issues -Select: **Repository** level development cycle **Project** for the next official release +Select: **Reviewer(s)** and **Development** issue Select: **Milestone** as the next official version +Select: **METplotpy-X.Y.Z Development** project for development toward the next official release - [ ] Iterate until the reviewer(s) accept and merge your changes. - [ ] Delete your fork or branch. - [ ] Close this issue. diff --git a/.github/ISSUE_TEMPLATE/new_feature_request.md b/.github/ISSUE_TEMPLATE/new_feature_request.md index 45bf64b9..40d83f7d 100644 --- a/.github/ISSUE_TEMPLATE/new_feature_request.md +++ b/.github/ISSUE_TEMPLATE/new_feature_request.md @@ -38,13 +38,14 @@ Consider breaking the new feature down into sub-issues. - [ ] Select **scientist(s)** or **no scientist** required ### Labels ### +- [ ] Review default **alert** labels - [ ] Select **component(s)** - [ ] Select **priority** - [ ] Select **requestor(s)** -### Projects and Milestone ### -- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label -- [ ] Select **Milestone** as the next official version or **Future Versions** +### Milestone and Projects### +- [ ] Select **Milestone** as a **METplotpy-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** +- [ ] For a **METplotpy-X.Y.Z** version, select the **METplotpy-X.Y.Z Development** project ## Define Related Issue(s) ## Consider the impact to the other METplus components. @@ -59,14 +60,14 @@ Branch name: `feature__` - [ ] Add/update log messages for easier debugging. - [ ] Add/update unit tests. - [ ] Add/update documentation. -- [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/overview.html#metplus-components-python-requirements) table. +- [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/appendixA.html#metplus-components-python-packages) table. - [ ] Push local changes to GitHub. - [ ] Submit a pull request to merge into **develop**. Pull request: `feature ` - [ ] Define the pull request metadata, as permissions allow. -Select: **Reviewer(s)** and **Development** issues -Select: **Repository** level development cycle **Project** for the next official release +Select: **Reviewer(s)** and **Development** issue Select: **Milestone** as the next official version +Select: **METplotpy-X.Y.Z Development** project for development toward the next official release - [ ] Iterate until the reviewer(s) accept and merge your changes. - [ ] Delete your fork or branch. - [ ] Close this issue. diff --git a/.github/ISSUE_TEMPLATE/sub-issue.md b/.github/ISSUE_TEMPLATE/sub-issue.md index 981cb1bc..d2e6da4d 100644 --- a/.github/ISSUE_TEMPLATE/sub-issue.md +++ b/.github/ISSUE_TEMPLATE/sub-issue.md @@ -24,10 +24,13 @@ This is a sub-issue of #*List the parent issue number here*. - [ ] Select **scientist(s)** or **no scientist** required ### Labels ### +- [ ] Review default **alert** labels - [ ] Select **component(s)** - [ ] Select **priority** - [ ] Select **requestor(s)** -### Projects and Milestone ### -- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label -- [ ] Select **Milestone** as the next official version or **Future Versions** +### Milestone and Projects ### +- [ ] Select **Milestone** as a **METplotpy-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** +- [ ] For a **METplotpy-X.Y.Z** version, select the **METplotpy-X.Y.Z Development** project + + diff --git a/.github/ISSUE_TEMPLATE/task.md b/.github/ISSUE_TEMPLATE/task.md index b80186c6..c015c1d5 100644 --- a/.github/ISSUE_TEMPLATE/task.md +++ b/.github/ISSUE_TEMPLATE/task.md @@ -34,13 +34,14 @@ Consider breaking the task down into sub-issues. - [ ] Select **scientist(s)** or **no scientist** required ### Labels ### +- [ ] Review default **alert** labels - [ ] Select **component(s)** - [ ] Select **priority** - [ ] Select **requestor(s)** -### Projects and Milestone ### -- [ ] Select **Repository** and/or **Organization** level **Project(s)** or add **alert: NEED CYCLE ASSIGNMENT** label -- [ ] Select **Milestone** as the next official version or **Future Versions** +### Milestone and Projects ### +- [ ] Select **Milestone** as a **METplotpy-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** +- [ ] For a **METplotpy-X.Y.Z** version, select the **METplotpy-X.Y.Z Development** project ## Define Related Issue(s) ## Consider the impact to the other METplus components. @@ -55,14 +56,14 @@ Branch name: `feature__` - [ ] Add/update log messages for easier debugging. - [ ] Add/update unit tests. - [ ] Add/update documentation. -- [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/overview.html#metplus-components-python-requirements) table. +- [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/appendixA.html#metplus-components-python-packages) table. - [ ] Push local changes to GitHub. - [ ] Submit a pull request to merge into **develop**. Pull request: `feature ` - [ ] Define the pull request metadata, as permissions allow. -Select: **Reviewer(s)** and **Development** issues -Select: **Repository** level development cycle **Project** for the next official release +Select: **Reviewer(s)** and **Development** issue Select: **Milestone** as the next official version +Select: **METplotpy-X.Y.Z Development** project for development toward the next official release - [ ] Iterate until the reviewer(s) accept and merge your changes. - [ ] Delete your fork or branch. - [ ] Close this issue. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 342abeb9..fe071422 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -18,14 +18,14 @@ If **yes**, please describe: ## Pull Request Checklist ## See the [METplus Workflow](https://metplus.readthedocs.io/en/latest/Contributors_Guide/github_workflow.html) for details. -- [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/overview.html#metplus-components-python-requirements) table. +- [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/appendixA.html#metplus-components-python-packages) table. - [ ] Review the source issue metadata (required labels, projects, and milestone). - [ ] Complete the PR definition above. - [ ] Ensure the PR title matches the feature or bugfix branch name. - [ ] Define the PR metadata, as permissions allow. -Select: **Reviewer(s)** -Select: **Organization** level software support **Project** or **Repository** level development cycle **Project** +Select: **Reviewer(s)** and **Development** issue Select: **Milestone** as the version that will include these changes +Select: **Coordinated METplus-X.Y Support** project for bugfix releases or **METplotpy-X.Y.Z Development** project for official releases - [ ] After submitting the PR, select the :gear: icon in the **Development** section of the right hand sidebar. Search for the issue that this PR will close and select it, if it is not already selected. - [ ] After the PR is approved, merge your changes. If permissions do not allow this, request that the reviewer do the merge. - [ ] Close the linked issue and delete your feature or bugfix branch from GitHub. diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yaml similarity index 100% rename from .github/workflows/documentation.yml rename to .github/workflows/documentation.yaml diff --git a/.github/workflows/release-checksum.yml b/.github/workflows/release-checksum.yaml similarity index 100% rename from .github/workflows/release-checksum.yml rename to .github/workflows/release-checksum.yaml diff --git a/.github/workflows/sonarqube.yml b/.github/workflows/sonarqube.yaml similarity index 70% rename from .github/workflows/sonarqube.yml rename to .github/workflows/sonarqube.yaml index d48e1237..cb809cbe 100644 --- a/.github/workflows/sonarqube.yml +++ b/.github/workflows/sonarqube.yaml @@ -48,6 +48,36 @@ jobs: # Disable shallow clones for better analysis fetch-depth: 0 + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Retrieve METcalcpy repository develop branch + run: | + metcalcpy_dir="$RUNNER_WORKSPACE/METcalcpy" + git clone https://github.com/dtcenter/METcalcpy ${metcalcpy_dir} + cd ${metcalcpy_dir} + git -C ${metcalcpy_dir} checkout develop + python -m pip install -e ${metcalcpy_dir} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + python -m pip install pytest-cov + + - name: Run Pytests + run: coverage run -m pytest + + - name: Output coverage report + run: coverage report -m + if: always() + + - name: Generate XML coverage report + run: coverage xml + if: always() + - name: Get branch name id: get_branch_name run: echo branch_name=${GITHUB_REF#refs/heads/} >> $GITHUB_OUTPUT diff --git a/.github/workflows/trigger_metplus.yml b/.github/workflows/trigger_metplus.yaml similarity index 98% rename from .github/workflows/trigger_metplus.yml rename to .github/workflows/trigger_metplus.yaml index bd70bc1a..85e6663e 100644 --- a/.github/workflows/trigger_metplus.yml +++ b/.github/workflows/trigger_metplus.yaml @@ -3,7 +3,7 @@ name: Trigger METplus Workflow on: push: branches: - - develop + - main paths-ignore: - 'docs/**' - '.github/pull_request_template.md' diff --git a/.github/workflows/unit_tests.yaml b/.github/workflows/unit_tests.yaml index a3c07bc5..27d578c2 100644 --- a/.github/workflows/unit_tests.yaml +++ b/.github/workflows/unit_tests.yaml @@ -39,10 +39,11 @@ jobs: - name: Retrieve METcalcpy repository develop branch run: | - /usr/bin/git clone https://github.com/dtcenter/METcalcpy - cd METcalcpy - /usr/bin/git checkout develop - python -m pip install -e . + metcalcpy_dir="$RUNNER_WORKSPACE/METcalcpy" + git clone https://github.com/dtcenter/METcalcpy ${metcalcpy_dir} + cd ${metcalcpy_dir} + git -C ${metcalcpy_dir} checkout develop + python -m pip install -e ${metcalcpy_dir} - name: Install dependencies run: | @@ -55,43 +56,8 @@ jobs: # run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})" # id: extract_branch - - name: Test with pytest - run: | - cd test - cd bar - pytest test_bar.py - cd ../box - pytest test_box.py - cd ../contour - pytest test_contour.py - cd ../eclv - pytest test_eclv.py - cd ../ens_ss - pytest test_ens_ss.py - cd ../equivalence_testing_bounds - pytest test_equivalence_testing_bounds.py - cd ../line - pytest test_line_groups_plot.py - pytest test_line_plot.py - cd ../mpr_plot - pytest test_mpr_plot.py - cd ../performance_diagram - pytest test_performance_diagram.py - cd ../reliability_diagram - pytest test_reliability_diagram.py - cd ../roc_diagram - pytest test_roc_diagram.py - cd ../taylor_diagram - pytest test_taylor_diagram.py - cd ../wind_rose - pytest test_wind_rose.py - cd ../histogram - pytest test_prob_hist.py - pytest test_rank_hist.py - pytest test_rel_hist.py - cd ../tcmpr_plots - pytest --capture=fd test_tcmpr_plots.py + run: pytest diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 00000000..abf271a3 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,9 @@ +# Default ignored files +/shelf/ +/workspace.xml +# Editor-based HTTP Client requests +/httpRequests/ +# Datasource local storage ignored files +/dataSources/ +/dataSources.local.xml +/sonarlint* diff --git a/.idea/METplotpy.iml b/.idea/METplotpy.iml new file mode 100644 index 00000000..2d40d024 --- /dev/null +++ b/.idea/METplotpy.iml @@ -0,0 +1,15 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 00000000..1bbf15dd --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,14 @@ + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 00000000..105ce2da --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 00000000..c58198b3 --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 00000000..8f21a77e --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 00000000..35eb1ddf --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/docs/Users_Guide/figure/tmp.vert_profile.MID_CONUS.png b/docs/Users_Guide/figure/tmp.vert_profile.MID_CONUS.png index 7f138422..4d559a9e 100644 Binary files a/docs/Users_Guide/figure/tmp.vert_profile.MID_CONUS.png and b/docs/Users_Guide/figure/tmp.vert_profile.MID_CONUS.png differ diff --git a/docs/Users_Guide/figure/tmp.vert_profile.png b/docs/Users_Guide/figure/tmp.vert_profile.png index 4cd47657..f6fb7358 100644 Binary files a/docs/Users_Guide/figure/tmp.vert_profile.png and b/docs/Users_Guide/figure/tmp.vert_profile.png differ diff --git a/docs/Users_Guide/figure/tmp_32.0N-115.0E-34.0N-82.0E.png b/docs/Users_Guide/figure/tmp_32.0N-115.0E-34.0N-82.0E.png deleted file mode 100644 index 3d196e80..00000000 Binary files a/docs/Users_Guide/figure/tmp_32.0N-115.0E-34.0N-82.0E.png and /dev/null differ diff --git a/docs/Users_Guide/figure/tmp_500hPa.png b/docs/Users_Guide/figure/tmp_500hPa.png index 0cc05b58..d8be105c 100644 Binary files a/docs/Users_Guide/figure/tmp_500hPa.png and b/docs/Users_Guide/figure/tmp_500hPa.png differ diff --git a/docs/Users_Guide/figure/tmp_pbl.png b/docs/Users_Guide/figure/tmp_pbl.png index b8c98595..d02e6542 100644 Binary files a/docs/Users_Guide/figure/tmp_pbl.png and b/docs/Users_Guide/figure/tmp_pbl.png differ diff --git a/docs/Users_Guide/figure/ugrd_28.0N-120.0E-26.0N-75.0E.png b/docs/Users_Guide/figure/ugrd_28.0N-120.0E-26.0N-75.0E.png new file mode 100644 index 00000000..6ce7ca54 Binary files /dev/null and b/docs/Users_Guide/figure/ugrd_28.0N-120.0E-26.0N-75.0E.png differ diff --git a/docs/Users_Guide/fv3_physics.rst b/docs/Users_Guide/fv3_physics.rst index 19700858..b22f147f 100644 --- a/docs/Users_Guide/fv3_physics.rst +++ b/docs/Users_Guide/fv3_physics.rst @@ -9,9 +9,14 @@ and spatial domain. Tendencies are partitioned into physics parameterizations an dynamics. Physics parameterizations include schemes like deep convection, convective gravity wave drag, short wave radiation, planetary boundary layer, microphysics, and others listed below. Non-physics tendencies (or dynamics) are due to horizontal -and vertical motion. The residual (which should be zero) is the difference between +and vertical motion (advection). + +residual = all tendencies - actual tendency + +The residual (which should be close to zero) is the +difference between the actual change in the state variable over the requested time window and the -expected change due to physics parameterizations and dynamics tendencies. One can plot +combined change due to all physics parameterizations and dynamics tendencies. One can plot a single tendency component at multiple pressure levels or plot all tendency components at a single pressure level. Plan views (horizontal cross sections), vertical profiles, and difference plots are also available. @@ -19,7 +24,7 @@ and difference plots are also available. Required Packages: ================== -* cartopy (0.20.3 only) +* cartopy * matplotlib @@ -47,80 +52,81 @@ Save this file in a directory where you have read and write permissions, such as $WORKING_DIR/data/fv3_physics_tend, where $WORKING_DIR is the path to the directory where you will save input data. -For additional details see `grid description in UFS Short Range Weather App user manual `_ - - -Available Tendency Variables ----------------------------- - -A small description of each tendency variable and their nicknames are shown below. Some -tendencies do not apply to all four state variables, so these cells are blank. - -+----------------------------+-------------+-------------------+-------------+-------------+ -| tendency | temperature | specific humidity | u-wind | v-wind | -+============================+=============+===================+=============+=============+ -|convective gravity wave drag| congwd| | congwd | congwd | -+----------------------------+-------------+-------------------+-------------+-------------+ -| deep convection | deepcnv| deepcnv | deepcnv| deepcnv| -+----------------------------+-------------+-------------------+-------------+-------------+ -| long wave radiation | lw | | | | -+----------------------------+-------------+-------------------+-------------+-------------+ -| microphysics | mp | mp | mp | mp | -+----------------------------+-------------+-------------------+-------------+-------------+ -|orographic gravity wave drag| orogwd| | orogwd | orogwd | -+----------------------------+-------------+-------------------+-------------+-------------+ -| planetary boundary layer | pbl | pbl | pbl | pbl | -+----------------------------+-------------+-------------------+-------------+-------------+ -| Rayleigh damping | rdamp | | rdamp | rdamp | -+----------------------------+-------------+-------------------+-------------+-------------+ -| shallow convection | shalcnv| shalcnv | shalcnv| shalcnv| -+----------------------------+-------------+-------------------+-------------+-------------+ -| short wave radiation | sw | | | | -+----------------------------+-------------+-------------------+-------------+-------------+ -| total physics (all above) | phys | phys | phys | phys | -+----------------------------+-------------+-------------------+-------------+-------------+ -| dynamics | nophys| nophys | nophys| nophys| -+----------------------------+-------------+-------------------+-------------+-------------+ -| state variable at validtime| tmp | spfh | ugrd | vgrd | -+----------------------------+-------------+-------------------+-------------+-------------+ -| actual change in state var | dtmp | dspfh | dugrd | dvgrd | -+----------------------------+-------------+-------------------+-------------+-------------+ - - - - -The expected names of the netCDF variables in the history file are shown below. If your history -file is different, one can change them in YAML config file -*$METPLOTPY_BASE/test/fv3_physics_tend/fv3_physics_tend_defaults.yaml* +For additional details see +`grid description in UFS Short Range Weather App user manual `_ + + +Default tendency variable names +------------------------------- + +Default tendency variable names are below. The tendencies that are available depend on the +physics suite that the user selects when running FV3; more specifically, its contents are +determined by the diag_table file that the user sets up. The history file that we +use our example is for a specific diag_table and so may change with different FV3 configurations. +The user must make sure the names in the configuration file +*$METPLOTPY_BASE/test/fv3_physics_tend/fv3_physics_tend_defaults.yaml* +match the names used in fv3_history.nc for their case. +Some tendencies do not apply to all four state variables, so these cells are left blank. **NOTE**: *$METPLOTPY_BASE* is the directory where the METplotpy code is saved (e.g. */path/to/user/dir/METplotpy*). -+----------------------------+-------------+-------------------+-------------+-------------+ -| tendency | temperature | specific humidity | u-wind | v-wind | -+============================+=============+===================+=============+=============+ -|convective gravity wave drag| dt3dt_congwd| |du3dt_congwd |dv3dt_congwd | -+----------------------------+-------------+-------------------+-------------+-------------+ -| deep convection |dt3dt_deepcnv| dq3dt_deepcnv |du3dt_deepcnv|dv3dt_deepcnv| -+----------------------------+-------------+-------------------+-------------+-------------+ -| long wave radiation | dt3dt_lw | | | | -+----------------------------+-------------+-------------------+-------------+-------------+ -| microphysics | dt3dt_mp | dq3dt_mp | du3dt_mp | dv3dt_mp | -+----------------------------+-------------+-------------------+-------------+-------------+ -|orographic gravity wave drag| dt3dt_orogwd| |du3dt_orogwd |dv3dt_orogwd | -+----------------------------+-------------+-------------------+-------------+-------------+ -| planetary boundary layer | dt3dt_pbl | dq3dt_pbl | du3dt_pbl | dv3dt_pbl | -+----------------------------+-------------+-------------------+-------------+-------------+ -| Rayleigh damping | dt3dt_rdamp | | du3dt_rdamp | dv3dt_rdamp | -+----------------------------+-------------+-------------------+-------------+-------------+ -| shallow convection |dt3dt_shalcnv| dq3dt_shalcnv |du3dt_shalcnv|dv3dt_shalcnv| -+----------------------------+-------------+-------------------+-------------+-------------+ -| short wave radiation | dt3dt_sw | | | | -+----------------------------+-------------+-------------------+-------------+-------------+ -| total physics (all above) | dt3dt_phys | dq3dt_phys |du3dt_phys | dv3dt_phys | -+----------------------------+-------------+-------------------+-------------+-------------+ -| dynamics | dt3dt_nophys| dq3dt_nophys | du3dt_nophys| dv3dt_nophys| -+----------------------------+-------------+-------------------+-------------+-------------+ ++-----------------------------+-------------+-------------------+-------------+-------------+ +| State Variable | temperature | specific humidity | u-wind | v-wind | ++=============================+=============+===================+=============+=============+ +| expected name | tmp | spfh | ugrd | vgrd | ++-----------------------------+-------------+-------------------+-------------+-------------+ + +Tendency variables: + ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| Tendency Variable | temperature | specific humidity | u-wind | v-wind | ++=============================+===================+===================+================+================+ +| convective gravity wave drag| dtend_temp_cnvgwd | | dtend_u_cnvgwd | dtend_v_cnvgwd | ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| deep convection | dtend_temp_deepcnv| dtend_qv_deepcnv | dtend_u_deepcnv| dtend_v_deepcnv| ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| long wave radiation | dtend_temp_lw | | | | ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| microphysics | dtend_temp_mp | dtend_qv_mp | | | ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| orographic gravity wave drag| dtend_temp_orogwd | | dtend_u_orogwd | dtend_v_orogwd | ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| planetary boundary layer | dtend_temp_pbl | dtend_qv_pbl | dtend_u_pbl | dtend_v_pbl | ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| Rayleigh damping | dtend_temp_rdamp | | dtend_u_rdamp | dtend_v_rdamp | ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| shallow convection | dtend_temp_shalcnv| dtend_qv_shalcnv | dtend_u_shalcnv| dtend_v_shalcnv| ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| short wave radiation | dtend_temp_sw | | | | ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| all physics tendencies | dtend_temp_phys | dtend_qv_phys | dtend_u_phys | dtend_v_phys | ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| dynamics (advection) | dtend_temp_nophys | dtend_qv_nophys | dtend_u_nophys | dtend_v_nophys | ++-----------------------------+-------------------+-------------------+----------------+----------------+ + + +Derived tendency variables that show up in plots: + ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| Derived Variable | temperature | specific humidity | u-wind | v-wind | ++=============================+===================+===================+================+================+ +| all phys and nophys | all | all | all | all | ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| actual tendency | actual | actual | actual | actual | ++-----------------------------+-------------------+-------------------+----------------+----------------+ +| residual tend. (all-actual) | resid | resid | resid | resid | ++-----------------------------+-------------------+-------------------+----------------+----------------+ + +If time window overlaps initialization time +------------------------------------------- + +The history file does not necessarily have the temperature, moisture, or wind at the exact +time of model initialization. It is usally the next timestep (e.g. 180 seconds later). +This means you cannot derive the actual change in temperature starting at the model initialization +time. You must choose a later valid time and/or a shorter time window that does not overlap +the initialization time. In other words, it is a problem if your model initialization time is 0z, your +valid time is 1z and your time window is one hour. Example ======= @@ -149,7 +155,8 @@ There is a YAML config file located in Run from the Command Line ========================= -To generate example tendency plots using settings in the **fv3_physics_defaults.yaml** configuration file, perform the following: +To generate example tendency plots using settings in the **fv3_physics_defaults.yaml** +configuration file, perform the following: .. code-block:: bash @@ -164,8 +171,9 @@ Plan View :: usage: planview_fv3.py [-h] [-d] [--method {nearest,linear,loglinear}] [--ncols NCOLS] - [--nofineprint] [-o OFILE] [-p PFULL [PFULL ...]] [-s SHP] - [--subtract SUBTRACT] [-t TWINDOW] [-v VALIDTIME] + [--nofineprint] [--norobust] [-o OFILE] [-p PFULL [PFULL ...]] + [-s SHP] [--subtract SUBTRACT] [-t TWINDOW] [-v VALIDTIME] + [--vmin VMIN] [--vmax VMAX] config historyfile gridfile statevariable fill Plan view of FV3 diagnostic tendency @@ -185,6 +193,8 @@ Plan View --ncols NCOLS number of columns (default: None) --nofineprint Don't add metadata and created by date (for comparing images) (default: False) + --norobust compute colormap range with extremes, not 2nd and 98th + percentiles (default: False) -o OFILE, --ofile OFILE name of output image file (default: None) -p PFULL [PFULL ...], --pfull PFULL [PFULL ...] @@ -198,13 +208,16 @@ Plan View time window in hours (default: 3) -v VALIDTIME, --validtime VALIDTIME valid time (default: None) + --vmin VMIN color bar minimum (overrides robust=True) (default: None) + --vmax VMAX color bar maximum (overrides robust=True) (default: None) -Generate a plan view of all tendencies at 500 hPa: +Generate a plan view of all tendencies at 500 hPa for the 1-hour time window ending 20190615 20z: .. code-block:: bash - python planview_fv3.py $CONFIG $WORKING_DIR/fv3_history.nc $WORKING_DIR/grid_spec.nc tmp pbl -p 500 -t 1 -v 20190504T14 --nofineprint + python planview_fv3.py $CONFIG $WORKING_DIR/fv3_history.nc $WORKING_DIR/grid_spec.nc tmp pbl \ + -p 500 -t 1 -v 20190615T20 --nofineprint .. image:: figure/tmp_500hPa.png @@ -212,7 +225,8 @@ Generate a plan view of PBL tendency at default pressure levels: .. code-block:: bash - python planview_fv3.py $CONFIG $WORKING_DIR/fv3_history.nc $WORKING_DIR/grid_spec.nc tmp pbl -t 1 -v 20190504T13 --nofineprint + python planview_fv3.py $CONFIG $WORKING_DIR/fv3_history.nc $WORKING_DIR/grid_spec.nc tmp pbl \ + -t 1 -v 20190615T20 --nofineprint .. image:: figure/tmp_pbl.png @@ -227,6 +241,7 @@ Vertical Profile usage: vert_profile_fv3.py [-h] [-d] [--nofineprint] [-o OFILE] [--resid] [-s SHP] [--subtract SUBTRACT] [-t TWINDOW] [-v VALIDTIME] + [--xmin XMIN] [--xmax XMAX] config historyfile gridfile statevariable Vertical profile of FV3 diagnostic tendencies @@ -251,12 +266,16 @@ Vertical Profile time window in hours (default: 3) -v VALIDTIME, --validtime VALIDTIME valid time (default: None) - -Generate vertical profile of temperature tendencies averaged over the mid-CONUS region: + --xmin XMIN x-axis minimum (default: None) + --xmax XMAX x-axis maximum (default: None) + +Generate vertical profile of temperature tendencies averaged over the central US. Plot residual +tendency and its components. Limit the x-axis range with --xmin and --xmax. .. code-block:: bash - python vert_profile_fv3.py $CONFIG $WORKING_DIR/fv3_history.nc $WORKING_DIR/grid_spec.nc tmp -t 2 -v 20190504T14 -s $METPLOTPY_BASE/metplotpy/contributed/fv3_physics_tend/shapefiles/MID_CONUS --nofineprint + python vert_profile_fv3.py $CONFIG $WORKING_DIR/fv3_history.nc $WORKING_DIR/grid_spec.nc tmp \ + -t 1 -v 20190615T20 -s shapefiles/MID_CONUS --resid --xmin -0.0005 --xmax 0.0004 --nofineprint .. image:: figure/tmp.vert_profile.MID_CONUS.png @@ -267,14 +286,14 @@ Vertical Cross Section python cross_section_vert.py -h -Usage:: +:: - usage: cross_section_vert.py [-h] [-d] [--dindex DINDEX] [--ncols NCOLS] [--nofineprint] - [-o OFILE] [-s START START] [-e END END] - [--subtract SUBTRACT] [-t TWINDOW] [-v VALIDTIME] + usage: cross_section_vert.py [-h] [-d] [--ncols NCOLS] [--nofineprint] [--norobust] [-o OFILE] + [-s START START] [-e END END] [--subtract SUBTRACT] [-t TWINDOW] + [-v VALIDTIME] [--vmin VMIN] [--vmax VMAX] config historyfile gridfile statevariable - Vertical cross section of FV3 diagnostic tendency + Vertical cross section of FV3 diagnostic tendencies positional arguments: config yaml configuration file @@ -285,29 +304,33 @@ Usage:: optional arguments: -h, --help show this help message and exit -d, --debug - --dindex DINDEX tick and gridline interval along cross section (default: 20) --ncols NCOLS number of columns (default: None) - --nofineprint Don't add metadata and created by date (for comparing images) - (default: False) + --nofineprint Don't add metadata and created by date (for comparing images) (default: False) + --norobust compute colormap range with extremes, not 2nd and 98th percentiles (default: + False) -o OFILE, --ofile OFILE name of output image file (default: None) -s START START, --start START START - start point (default: (28, -115)) + start point lat lon (default: (28, -115)) -e END END, --end END END - end point (default: (30, -82)) + end point lat lon (default: (30, -82)) --subtract SUBTRACT FV3 history file to subtract (default: None) -t TWINDOW, --twindow TWINDOW time window in hours (default: 3) -v VALIDTIME, --validtime VALIDTIME valid time (default: None) - -Generate vertical cross section from 32°N 115°W to 34°N 82°W: + --vmin VMIN color bar minimum (overrides robust=True) (default: None) + --vmax VMAX color bar maximum (overrides robust=True) (default: None) + +Generate vertical cross section of u-wind tendencies from 28°N 120°W to 26°N 75°W over one-hour +time window ending 20z June 15, 2019. .. code-block:: bash - python cross_section_vert.py $CONFIG $WORKING_DIR/fv3_history.nc $WORKING_DIR/grid_spec.nc tmp -t 2 -v 20190504T14 -s 32 -115 -e 34 -82 --nofineprint + python cross_section_vert.py $CONFIG $WORKING_DIR/fv3_history.nc $WORKING_DIR/grid_spec.nc ugrd \ + -t 1 -v "2019-06-15 20" -s 28 -120 -e 26 -75 --nofineprint -.. image:: figure/tmp_32.0N-115.0E-34.0N-82.0E.png +.. image:: figure/ugrd_28.0N-120.0E-26.0N-75.0E.png Difference Plot --------------- @@ -317,7 +340,8 @@ Put file you want to subtract after the --subtract argument: .. code-block:: bash - python vert_profile_fv3.py $CONFIG $WORKING_DIR/fv3_history.nc $WORKING_DIR/grid_spec.nc tmp -t 1 --subtract $WORKING_DIR/fv3_history.nc --nofineprint + python vert_profile_fv3.py $CONFIG $WORKING_DIR/fv3_history.nc $WORKING_DIR/grid_spec.nc tmp \ + -t 1 --subtract $WORKING_DIR/fv3_history.nc --resid --nofineprint .. image:: figure/tmp.vert_profile.png diff --git a/docs/Users_Guide/release-notes.rst b/docs/Users_Guide/release-notes.rst index 15692723..892c3f6f 100644 --- a/docs/Users_Guide/release-notes.rst +++ b/docs/Users_Guide/release-notes.rst @@ -10,6 +10,43 @@ describes the bugfix, enhancement, or new feature: METplotpy Release Notes ======================= + +METplotpy Version 3.0.0-beta5 release notes (20240630) +------------------------------------------------------ + + + .. dropdown:: New Plots + + + .. dropdown:: Enhancements + + + * **Enhance TCMPR plotter to read TCDiag lines and filter by one column and plot another column** (`#342 `_). + + * Create documentation and testing for TCMPR plotting code (`#383 `_). + + * Update fv3_physics_tend for new FV3 output format (`#380 `_). + + .. dropdown:: Internal + + + * Update GitHub issue and pull request templates to reflect the current development workflow details (`#388 `_). + + * Consider using only .yml or only .yaml extensions (`#417 `_). + + + * METplotpy:Code coverage statistics (`#55 `_). + + + .. dropdown:: Bugfixes + + + + + + + + METplotpy Version 3.0.0-beta4 release notes (20240417) ------------------------------------------------------ diff --git a/docs/Users_Guide/roc_diagram.rst b/docs/Users_Guide/roc_diagram.rst index 2156ec0b..0c1e9713 100644 --- a/docs/Users_Guide/roc_diagram.rst +++ b/docs/Users_Guide/roc_diagram.rst @@ -18,6 +18,15 @@ refer to the Example ======= +Data +---- + +The ROC diagram utilizes MET CTC or PCT linetype data. The MET .stat output files must +first be reformatted into a format recognized by the ROC diagram code. The METdataio METreformat +module provides reformatting support for these linetypes. Please refer to the +`METdataio User's Guide `_ +for instructions on reformatting the MET .stat output. + Sample Data ----------- @@ -111,9 +120,18 @@ custom config files and sample data reside). Replace the relative path *./plot_20200507_074426.data* with the full path *$WORKING_DIR/plot_20200507_074426.data* (including replacing *$WORKING_DIR* with the full path to the working directory). + Modify the *plot_filename* setting to point to the output path where the plot will be saved, including the name of the plot. +Modify the *roc_pct* and *roc_ctc* settings in the +*$WORKING_DIR/custom_roc_diagram.yaml* +file to explicitly indicate the linetype of the input data. If using +PCT input data, then set *roc_pct* to True and *roc_ctc* to False. The data used in this +example is CTC linetype data, therefore the *roc_pct* setting is set to False and the *roc_ctc* +setting is set to True. + + For example: *stat_input: /username/myworkspace/METplotpy/test/roc_diagram/plot_20200507_074426.data* diff --git a/docs/Users_Guide/stratosphere_plots.rst b/docs/Users_Guide/stratosphere_plots.rst index afe21aa0..8fc88d6f 100644 --- a/docs/Users_Guide/stratosphere_plots.rst +++ b/docs/Users_Guide/stratosphere_plots.rst @@ -6,11 +6,11 @@ Description =========== The **stratosphere_plots.py** script contains the plotting portion for -two Stratosphere use cases, one which creates a ME plot in latitude and pressure -and another which creates ME and RMSE plots for lead time and pressure -Two METplus use cases, illustrate how to use this plot. One runs `zonal mean biases -`_ -and another creates bias and RMSE for `polar cap temperature and polar vortex U `_ +three Stratosphere use cases. One use case creates a ME plot in latitude and pressure, +another which creates ME and RMSE plots for lead time and pressure, and a third which +creates two phase diagrams and a time series of U for at 50mb and 30mb. +The three METplus use cases, illustrate how to use these plotting scripts for `zonal mean biases +`_ , creating bias and RMSE for `polar cap temperature and polar vortex U `_ and creating `phase diagrams and time series for QBO `_ These files are used by the image comparison test: @@ -32,6 +32,18 @@ These files are used by the image comparison test: * **RMSE_2018_02_polar_vortex_U.png**: Run "plot_polar_rmse" in **stratosphere_plots.py** to create this plot. +* **ERA_GFS_QBO_circuits.png**: Run "plot_qbo_phase_circuits" in **stratosphere_plots.py** + to create this plot. + +* **ERA5_QBO_PhaseSpace.png**: Run "plot_qbo_phase_space" in **stratosphere_plots.py** + to create this plot. + +* **ERA_GFS_timeseries_30mb_u_201710_201802.png**: Run "plot_u_timeseries" in **stratosphere_plots.py** + to create this plot. + +* **ERA_GFS_timeseries_50mb_u_201710_201802.png**: Run "plot_u_timeseries" in **stratosphere_plots.py** + to create this plot. + Required Packages ================= @@ -46,6 +58,8 @@ Required Packages * numpy +* xarray + * pandas * cmocean @@ -60,13 +74,13 @@ Import stratosphere_plots into the script: .. code-block:: ini - from stratosphere_plots import plot_zonal_bias,plot_polar_bias,plot_polar_rmse + from stratosphere_plots import plot_zonal_bias,plot_polar_bias,plot_polar_rmse, plot_qbo_phase_circuits,plot_qbo_phase_space,plot_u_timeseries For plot_zonal_bias ------------------- In the code, generate the following as numpy -arrays (except outfile, ptitle, and plevels). +arrays (except outfile, ptitle, and plevs). **lats:** A numpy array of the latitude values under consideration. @@ -82,14 +96,14 @@ file, a **.png** version will be written. **ptitle:** A string containing the title of the plot. -**plevels:** A list containing integers of the contour levels used in +**plevs:** A list containing integers of the contour levels used in plotting the obs climatology. For plot_polar_bias ------------------- In the code, generate the following as numpy arrays -(except wrnum, output_plotname, and plevels). +(except outfile, ptitle, and plevs). **leads:** A numpy array containing the forecast lead times. @@ -103,19 +117,19 @@ file, a **.png** version will be written. **ptitle:** A string containing the title of the plot. **plevs:** A list containing floats of the contour levels used in -plotting +plotting. For plot_polar_rmse ------------------- In the code, generate the following as numpy arrays -(except wrnum, output_plotname, and plevels). +(except outfile, ptitle, and plevs). **leads:** A numpy array containing the forecast lead times. **levels:** A numpy array of the pressure level values under consideration. -**pdata:** A numpy array containing the bias. +**pdata:** A numpy array containing the RMSE. **outfile:** The full path and filename of the output plot file, a **.png** version will be written. @@ -123,7 +137,63 @@ file, a **.png** version will be written. **ptitle:** A string containing the title of the plot. **plevs:** A list containing floats of the contour levels used in -plotting +plotting. + +For plot_qbo_phase_circuits +--------------------------- + +In the code, generate the following as numpy arrays +(except inits, periods, and outfile). + +**inits:** A listing of datetimes that are the start date for each plot. + +**periods:** An integer containing the number of days to plot from the inits. + +**rean_qbo_pcs:** An xarray dataarray containing the projected daily +zonal winds for the observations. + +**rfcst_qbo_pcs:** An xarray dataarray containing the projected +daily zonal winds for the model. + +**outfile:** The full path and filename of the output plot +file, a **.png** version will be written. + +For plot_qbo_phase_space +------------------------ + +In the code, generate the following as numpy arrays +(except ptitle and outfile). + +**rean_qbo_pcs:** An xarray dataarray containing the projected +daily zonal winds. + +**eofs:** An xarray dataarray containing the EOFs. + +**ptitle:** A string containing the title of the plot. + +**outfile:** The full path and filename of the output plot +file, a **.png** version will be written. + +For plot_u_timeseries +--------------------- + +In the code, generate the following as numpy arrays +(except ptitle and outfile). + +**obs_dt:** A numpy array of datetimes for the observations. + +**obs_u:** A numpy array containing U wind values for +the observations. + +**fcst_dt:** A numpy array of datetimes for the forecasts. + +**fcst_u:** A numpy array containing U wind values for +the forecasts. + +**ptitle:** A string containing the title of the plot. + +**outfile:** The full path and filename of the output plot +file, a **.png** version will be written. Invoke the plotting functions: @@ -135,7 +205,13 @@ Invoke the plotting functions: plot_polar_rmse(leads,levels,pdata,outfile,ptitle,plevs) + plot_qbo_phase_circuits(inits,periods,rean_qbo_pcs,rfcst_qbo_pcs,outfile) + + plot_qbo_phase_space(rean_qbo_pcs,eofs,ptitle,outfile) + + plot_u_timeseries(obs_dt,obs_u,fcst_dt,fcst_u,ptitle,outfile) + The output will be **.png** version of all requested plots and will be located based on what was specified (path and name) in the -**output_plotname**. +**outfile**. diff --git a/docs/conf.py b/docs/conf.py index 2cf55a05..dbd6c749 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,12 +22,12 @@ copyright = '2024, NSF NCAR' author = 'UCAR/NSF NCAR, NOAA, CSU/CIRA, and CU/CIRES' author_list = 'Fisher, H., D. Adriaansen, C. Kalb, D. Fillmore, T. Jensen, L. Goodrich, M. Win-Gildenmeister and T. Burek' -version = 'v3.0.0-beta5-dev' +version = 'v3.0.0-beta6-dev' verinfo = version release = f'{version}' release_year = '2024' -release_date = f'{release_year}-04-17' +release_date = f'{release_year}-06-30' copyright = f'{release_year}, {author}' diff --git a/docs/version b/docs/version index 9863e245..16bcea84 100644 --- a/docs/version +++ b/docs/version @@ -1 +1 @@ -__version__ = "3.0.0-beta5-dev" +__version__ = "3.0.0-beta6-dev" diff --git a/internal/scripts/sonarqube/sonar-project.properties b/internal/scripts/sonarqube/sonar-project.properties index 894fec47..0777e588 100644 --- a/internal/scripts/sonarqube/sonar-project.properties +++ b/internal/scripts/sonarqube/sonar-project.properties @@ -5,6 +5,7 @@ sonar.projectVersion=SONAR_PROJECT_VERSION sonar.branch.name=SONAR_BRANCH_NAME sonar.sources=metplotpy,test sonar.coverage.exclusions=test/** +sonar.python.coverage.reportPaths=coverage.xml sonar.sourceEncoding=UTF-8 # SonarQube server diff --git a/metplotpy/contributed/fv3_physics_tend/cross_section_vert.py b/metplotpy/contributed/fv3_physics_tend/cross_section_vert.py index a5970d2b..d840233f 100644 --- a/metplotpy/contributed/fv3_physics_tend/cross_section_vert.py +++ b/metplotpy/contributed/fv3_physics_tend/cross_section_vert.py @@ -1,70 +1,89 @@ +""" vertcal cross section of tendencies """ import argparse -import cartopy import datetime import logging +import os +import cartopy import matplotlib.pyplot as plt from matplotlib.ticker import MultipleLocator from metpy.interpolate import cross_section from metpy.units import units import numpy as np -import os import pandas as pd -import pdb -from . import physics_tend -import sys import xarray import yaml - -""" -Plan view of tendencies of t, q, u, or v from physics parameterizations, dynamics (non-physics), their total, and residual. -Total change is the actual change in state variable from first time to last time. Total change differs from cumulative change -attributed to physics and non-physics tendencies when residual is not zero. -""" +from . import physics_tend def parse_args(): + """ + parse command line arguments + """ # =============Arguments=================== - parser = argparse.ArgumentParser(description = "Vertical cross section of FV3 diagnostic tendency", formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = argparse.ArgumentParser( + description="Vertical cross section of FV3 diagnostic tendencies", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) # ==========Mandatory Arguments=================== - parser.add_argument("config", type=argparse.FileType('r'), help="yaml configuration file") - parser.add_argument("historyfile", type=argparse.FileType("r"), help="FV3 history file") - parser.add_argument("gridfile", type=argparse.FileType("r"), help="FV3 grid spec file") - parser.add_argument("statevariable", type=str, help="moisture, temperature, or wind component variable name") + parser.add_argument("config", help="yaml configuration file") + parser.add_argument("historyfile", help="FV3 history file") + parser.add_argument("gridfile", help="FV3 grid spec file") + parser.add_argument( + "statevariable", help="moisture, temperature, or wind component variable name") # ==========Optional Arguments=================== parser.add_argument("-d", "--debug", action='store_true') - parser.add_argument("--dindex", type=int, default=20, help="tick and gridline interval along cross section") - parser.add_argument("--ncols", type=int, default=None, help="number of columns") - parser.add_argument("--nofineprint", action='store_true', help="Don't add metadata and created by date (for comparing images)") - parser.add_argument("-o", "--ofile", type=str, help="name of output image file") - parser.add_argument("-s", "--start", nargs=2, type=float, default=(28, -115), help="start point") - parser.add_argument("-e", "--end", nargs=2, type=float, default=(30, -82), help="end point") - parser.add_argument("--subtract", type=argparse.FileType("r"), help="FV3 history file to subtract") - parser.add_argument("-t", "--twindow", type=int, default=3, help="time window in hours") - parser.add_argument("-v", "--validtime", type=lambda x:pd.to_datetime(x), help="valid time") + parser.add_argument("--ncols", type=int, default=None, + help="number of columns") + parser.add_argument("--nofineprint", action='store_true', + help="Don't add metadata and created by date (for comparing images)") + parser.add_argument("--norobust", action='store_true', + help="compute colormap range with extremes, not 2nd and 98th percentiles") + parser.add_argument("-o", "--ofile", help="name of output image file") + parser.add_argument("-s", "--start", nargs=2, type=float, + default=(28, -115), help="start point lat lon") + parser.add_argument("-e", "--end", nargs=2, type=float, + default=(30, -82), help="end point lat lon") + parser.add_argument("--subtract", help="FV3 history file to subtract") + parser.add_argument("-t", "--twindow", type=float, + default=3, help="time window in hours") + parser.add_argument("-v", "--validtime", help="valid time") + parser.add_argument("--vmin", type=float, + help="color bar minimum (overrides robust=True)") + parser.add_argument("--vmax", type=float, + help="color bar maximum (overrides robust=True)") args = parser.parse_args() return args def main(): + """ + Vertical cross section view of tendencies of t, q, u, or v from physics parameterizations, + dynamics (non-physics), the combination of all tendencies (physics and non-physics), + the actual tendency, and the residual. Residual is the sum of all tendencies minus the + actual tendency. + """ args = parse_args() - gfile = args.gridfile - ifile = args.historyfile - variable = args.statevariable - config = args.config - debug = args.debug - dindex = args.dindex - ncols = args.ncols - nofineprint= args.nofineprint - ofile = args.ofile - startpt = args.start - endpt = args.end - subtract = args.subtract - twindow = datetime.timedelta(hours = args.twindow) - validtime = args.validtime + gfile = args.gridfile + ifile = args.historyfile + variable = args.statevariable + config = args.config + ncols = args.ncols + nofineprint = args.nofineprint + ofile = args.ofile + startpt = args.start + endpt = args.end + robust = not args.norobust + subtract = args.subtract + twindow = datetime.timedelta(hours=args.twindow) + twindow_quantity = twindow.total_seconds() * units.seconds + validtime = pd.to_datetime(args.validtime) + vmin = args.vmin + vmax = args.vmax level = logging.INFO - if debug: level = logging.DEBUG - logging.basicConfig(format='%(asctime)s - %(message)s', level=level) # prepend log message with time + if args.debug: + level = logging.DEBUG + # prepend log message with time + logging.basicConfig(format='%(asctime)s - %(message)s', level=level) logging.debug(args) # Output filename. @@ -74,135 +93,173 @@ def main(): ofile = os.path.realpath(args.ofile) odir = os.path.dirname(ofile) if not os.path.exists(odir): - logging.info(f"output directory {odir} does not exist. Creating it") + logging.info( + f"output directory {odir} does not exist. Creating it") os.mkdir(odir) - logging.info(f"output filename={ofile}") - + logging.debug("output filename=%s", ofile) # Reload fv3 in case user specifies a custom --config file - fv3 = yaml.load(open(config.name), Loader=yaml.FullLoader) + fv3 = yaml.load(open(config, encoding="utf8"), Loader=yaml.FullLoader) # Read lat/lon from gfile logging.debug(f"read lat/lon from {gfile}") - gds = xarray.open_dataset(gfile.name) + gds = xarray.open_dataset(gfile) lont = gds[fv3["lon_name"]] latt = gds[fv3["lat_name"]] # Open input file - logging.debug(f"About to open {ifile}") - fv3ds = xarray.open_dataset(ifile.name) + logging.debug("open %s", ifile) + fv3ds = xarray.open_dataset(ifile) + + if subtract: + logging.info("subtracting %s", subtract) + with xarray.set_options(keep_attrs=True): + fv3ds -= xarray.open_dataset(subtract) + datetimeindex = fv3ds.indexes['time'] if hasattr(datetimeindex, "to_datetimeindex"): - # Convert from CFTime to pandas datetime. I get a warning CFTimeIndex from non-standard calendar 'julian'. Maybe history file should be saved with standard calendar. + # Convert from CFTime to pandas datetime or get warning + # CFTimeIndex from non-standard calendar 'julian'. + # Maybe history file should be saved with standard calendar. # To turn off warning, set unsafe=True. datetimeindex = datetimeindex.to_datetimeindex(unsafe=True) + ragged_times = datetimeindex != datetimeindex.round('1ms') + if any(ragged_times): + logging.info( + f"round times to nearest millisec. before: {datetimeindex[ragged_times].values}") + datetimeindex = datetimeindex.round('1ms') + logging.info(f"after: {datetimeindex[ragged_times].values}") fv3ds['time'] = datetimeindex - if subtract: - logging.debug(f"subtracting {subtract.name}") - with xarray.set_options(keep_attrs=True): - fv3ds -= xarray.open_dataset(subtract.name) - fv3ds = fv3ds.assign_coords(lont=lont, latt=latt) # lont and latt used by pcolorfill() - tendency_vars = fv3["tendency_varnames"][variable] # list of tendency variable names for requested state variable - fv3ds = physics_tend.add_time0(fv3ds, variable, fv3) - tendencies = fv3ds[tendency_vars] # subset of original Dataset + # lont and latt used by pcolorfill() + fv3ds = fv3ds.assign_coords(lont=lont, latt=latt) if validtime is None: - logging.debug("validtime not provided on command line, so use latest time in history file.") validtime = fv3ds.time.values[-1] validtime = pd.to_datetime(validtime) + logging.info( + "validtime not provided on command line. Using last time in history file %s.", + validtime) time0 = validtime - twindow - time1 = time0 + datetime.timedelta(hours=1) - logging.info(f"Sum tendencies {time1}-{validtime}") - tindex = dict(time=slice(time1, validtime)) # slice of time from hour after time0 through validtime - tendencies_avg = tendencies.sel(tindex).mean(dim="time") # average tendencies in time - - # Dynamics (nophys) tendency is not reset every hour. Just calculate change from time0 to validtime. - nophys_var = [x for x in tendency_vars if x.endswith("_nophys")] - assert len(nophys_var) == 1 - nophys_var = nophys_var[0] # we don't want a 1-element list; we want a string. So that tendencies[nophys_var] is a DataArray, not a Dataset. - logging.info(f"Subtract nophys tendency at {time0} from {validtime}") - nophys_delta = tendencies[nophys_var].sel(time=validtime) - tendencies[nophys_var].sel(time=time0) - tendencies_avg[nophys_var] = nophys_delta / args.twindow - - - # Restore units after .mean() removed them. Copy units from 1st tendency variable. - tendency_units = units.parse_expression(fv3ds[tendency_vars[0]].units) - logging.debug(f"restoring {tendency_units} units after .mean() method removed them.") - tendencies_avg *= tendency_units - for da in tendencies_avg: - tendencies_avg[da] = tendencies_avg[da].metpy.convert_units("K/hour") - long_names = [fv3ds[da].attrs["long_name"] for da in tendencies_avg] # Make list of long_names before .to_array() loses them. - - # Remove characters up to and including 1st underscore (e.g. du3dt_) in DataArray name. - # for example dt3dt_pbl -> pbl - name_dict = {da : "_".join(da.split("_")[1:]) for da in tendencies_avg.data_vars} + assert time0 in fv3ds.time, (f"time0 {time0} not in history file. Closest is " + f"{fv3ds.time.sel(time=time0, method='nearest').time.data}") + + # list of tendency variable names for requested state variable + tendency_vars = fv3["tendency_varnames"][variable] + tendencies = fv3ds[tendency_vars] # subset of original Dataset + # convert DataArrays to Quantities to protect units. DataArray.mean drops units attribute. + tendencies = tendencies.metpy.quantify() + + # Define time slice starting with time-after-time0 and ending with validtime. + # We use the time *after* time0 because the time range corresponding to the tendency + # output is the period immediately prior to the tendency timestamp. + # That way, slice(time_after_time0, validtime) has a time range of [time0,validtime]. + idx_first_time_after_time0 = (fv3ds.time > time0).argmax() + time_after_time0 = fv3ds.time[idx_first_time_after_time0] + tindex = {"time": slice(time_after_time0, validtime)} + logging.debug( + "Time-weighted mean tendencies for time index slice %s", tindex) + timeweights = fv3ds.time.diff("time").sel(tindex) + time_weighted_tendencies = tendencies.sel(tindex) * timeweights + tendencies_avg = time_weighted_tendencies.sum( + dim="time") / timeweights.sum(dim="time") + + # Make list of long_names before .to_array() loses them. + long_names = [fv3ds[da].attrs["long_name"] for da in tendencies_avg] + + # Keep characters after final underscore. The first part is redundant. + # for example dtend_u_pbl -> pbl + name_dict = {da: "_".join(da.split("_")[-1:]) + for da in tendencies_avg.data_vars} + logging.debug("rename %s", name_dict) tendencies_avg = tendencies_avg.rename(name_dict) # Stack variables along new tendency dimension of new DataArray. tendency_dim = f"{variable} tendency" - tendencies_avg = tendencies_avg.to_array(dim=tendency_dim) - # Assign long_names to a new DataArray coordinate. It will have the same shape as tendency dimension. - tendencies_avg = tendencies_avg.assign_coords({"long_name":(tendency_dim,long_names)}) - - logging.info(f"calculate actual change in {variable}") - state_variable = fv3ds[variable].metpy.quantify() # Tried metpy.quantify() with open_dataset, but pint.errors.UndefinedUnitError: 'dBz' is not defined in the unit registry - dstate_variable = state_variable.sel(time = validtime) - state_variable.sel(time = time0) - dstate_variable = dstate_variable.assign_coords(time=validtime) - dstate_variable.attrs["long_name"] = f"actual change in {state_variable.attrs['long_name']}" - - # Add all tendencies together and subtract actual rate of change in state variable. - # This is residual. - total = tendencies_avg.sum(dim=tendency_dim) - twindow_quantity = twindow.total_seconds() * units.seconds - resid = total - dstate_variable/twindow_quantity - - - logging.info("Define DataArray to plot (da2plot).") - # Plot all tendencies. - da2plot = tendencies_avg - # Add total and resid DataArrays to tendency_dim. - total = total.expand_dims({tendency_dim:["total"]}).assign_coords(long_name="sum of tendencies") - resid = resid.expand_dims({tendency_dim:["resid"]}).assign_coords(long_name=f"sum of tendencies - actual rate of change of {variable} (residual)") - da2plot = xarray.concat([da2plot, total, resid], dim=tendency_dim) + tendencies_avg = tendencies_avg.to_array(dim=tendency_dim, name=tendency_dim) + # Assign long_names to a new DataArray coordinate. + # It will have the same shape as tendency dimension. + tendencies_avg = tendencies_avg.assign_coords( + {"long_name": (tendency_dim, long_names)}) + + logging.info("calculate actual change in %s", variable) + # Tried metpy.quantify() with open_dataset, but + # pint.errors.UndefinedUnitError: 'dBz' is not defined in the unit registry + state_variable = fv3ds[variable].metpy.quantify() + actual_change = state_variable.sel(time=validtime) - state_variable.sel( + time=time0, method="nearest", tolerance=datetime.timedelta(milliseconds=1)) + actual_change = actual_change.assign_coords(time=validtime) + actual_change.attrs["long_name"] = f"actual change in {state_variable.attrs['long_name']}" + + # Sum all tendencies (physics and non-physics) + all_tendencies = tendencies_avg.sum(dim=tendency_dim) + + # Subtract physics tendency variable if it was in tendency_vars. Don't want to double-count. + phys_var = [x for x in tendency_vars if x.endswith("_phys")] + if phys_var: + logging.info("subtracting 'phys' tendency variable " + "from all_tendencies to avoid double-counting") + # use .data to avoid re-introducing tendency coordinate + all_tendencies = all_tendencies - \ + tendencies_avg.sel({tendency_dim: "phys"}).data + + # Calculate actual tendency of state variable. + actual_tendency = actual_change / twindow_quantity + logging.info( + "subtract actual tendency from all_tendencies to get residual") + resid = all_tendencies - actual_tendency + + # Concatenate all_tendencies, actual_tendency, and resid DataArrays. + # Give them a name and long_name along tendency_dim. + all_tendencies = all_tendencies.expand_dims({tendency_dim: ["all"]}).assign_coords( + long_name="sum of tendencies") + actual_tendency = actual_tendency.expand_dims({tendency_dim: ["actual"]}).assign_coords( + long_name=f"actual rate of change of {variable}") + resid = resid.expand_dims({tendency_dim: ["resid"]}).assign_coords( + long_name=f"sum of tendencies - actual rate of change of {variable} (residual)") + da2plot = xarray.concat( + [tendencies_avg, all_tendencies, actual_tendency, resid], dim=tendency_dim) col = tendency_dim if da2plot.metpy.vertical.attrs["units"] == "mb": - da2plot.metpy.vertical.attrs["units"] = "hPa" # For MetPy. Otherwise, mb is interpreted as millibarn. - + # For MetPy. Otherwise, mb is interpreted as millibarn. + da2plot.metpy.vertical.attrs["units"] = "hPa" # Make default dimensions of facetgrid kind of square. if not ncols: # Default # of cols is square root of # of panels ncols = int(np.ceil(np.sqrt(len(da2plot)))) - nrows = int(np.ceil(len(da2plot)/ncols)) - - # dequantify moves units from DataArray to Attributes. Now they show up in colorbar. - da2plot = da2plot.metpy.dequantify() - - if da2plot["pfull"].size == 1: - # Avoid ValueError: ('grid_yt', 'grid_xt') must be a permuted list of ('pfull', 'grid_yt', 'grid_xt'), unless `...` is included - # Error occurs in pcolormesh(). - da2plot=da2plot.squeeze() - # Kludgy steps to prepare metadata for metpy cross section - da2plot = da2plot.drop_vars(['grid_yt','grid_xt','long_name']).rename(dict(grid_yt="y",grid_xt="x")) # these confuse metpy + # grid_yt and grid_xt confuse metpy + da2plot = da2plot.drop_vars(['grid_yt', 'grid_xt', 'long_name']).rename( + {"grid_yt":"y", "grid_xt":"x"}) # fv3 uses Extended Schmidt Gnomomic grid for regional applications. This is not in cartopy. # Found similar Lambert Conformal projection by trial and error. - da2plot = da2plot.metpy.assign_crs( grid_mapping_name="lambert_conformal_conic", standard_parallel=fv3["standard_parallel"], longitude_of_central_meridian=-97.5, latitude_of_projection_origin=fv3["standard_parallel"]).metpy.assign_y_x(force=True, tolerance=44069*units.m) - # Define cross section. Use different variable than da2plot because da2plot is used later for inset. - # upgraded xarray to 0.21.1 to avoid FutureWarning: Passing method to Float64Index.get_loc is deprecated - cross = cross_section(da2plot, startpt, endpt) + da2plot = da2plot.metpy.assign_crs( + grid_mapping_name="lambert_conformal_conic", + standard_parallel=fv3["standard_parallel"], + longitude_of_central_meridian=-97.5, + latitude_of_projection_origin=fv3["standard_parallel"]).metpy.assign_y_x( + force=True, tolerance=55000*units.m) + + # dequantify moves units from DataArray to attributes. Now they show up in colorbar. + # and avoid NotImplementedError: Don't yet support nd fancy indexing from cross_section() + da2plot = da2plot.metpy.dequantify() + logging.info("Define cross section.") + cross = cross_section(da2plot, startpt, endpt) logging.info("plot pcolormesh") - w,h = 0.18, 0.18 # normalized width and height of inset. Shrink colorbar to provide space. - pc = cross.plot.pcolormesh(x="index", y="pfull", yincrease=False, col=col, col_wrap=ncols, robust=True, infer_intervals=True, - cmap=fv3["cmap"], cbar_kwargs={'shrink':1-h, 'anchor':(0,0.25-h)}) # robust (bool, optional) – If True and vmin or vmax are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values - for ax in pc.axes.flat: + # normalized width and height of inset. Shrink colorbar to provide space. + wid_inset, hgt_inset = 0.18, 0.18 + pcm = cross.squeeze().plot.pcolormesh(x="lont", y="pfull", yincrease=False, + col=col, col_wrap=ncols, robust=robust, infer_intervals=True, + vmin=vmin, vmax=vmax, cmap=fv3["cmap"], + cbar_kwargs={'shrink': 1-hgt_inset, 'anchor': (0, 0.25-hgt_inset)}) + + for ax in pcm.axs.flat: ax.grid(visible=True, color="grey", alpha=0.5, lw=0.5) - ax.xaxis.set_major_locator(MultipleLocator(dindex)) ax.yaxis.set_major_locator(MultipleLocator(100)) ax.yaxis.set_minor_locator(MultipleLocator(25)) ax.grid(which="minor", alpha=0.3, lw=0.4) @@ -210,36 +267,39 @@ def main(): # Add time to title title = f'{time0}-{validtime} ({twindow_quantity.to("hours"):~} time window)' plt.suptitle(title, wrap=True) - # pad top and bottom for title and fineprint. Unfortunately, you must redefine right pad, as xarray no longer controls it. - plt.subplots_adjust(top=0.9,right=0.8,bottom=0.1) + # pad top and bottom for title and fineprint. + # Unfortunately, you must redefine right pad, as xarray no longer controls it. + plt.subplots_adjust(top=0.9, right=0.8, bottom=0.1) # Locate cross section on conus map background. Put in inset. data_crs = da2plot.metpy.cartopy_crs - ax_inset = plt.gcf().add_axes([.999-w,.999-h, w, h], projection=data_crs) + ax_inset = plt.gcf().add_axes( + [.995-wid_inset, .999-hgt_inset, wid_inset, hgt_inset], projection=data_crs) # Plot the endpoints of the cross section (make sure they match path) - endpoints = data_crs.transform_points(cartopy.crs.Geodetic(), *np.vstack([startpt, endpt]).transpose()[::-1]) - bb = ax_inset.scatter(endpoints[:, 0], endpoints[:, 1], s=1.3, c='k', zorder=2) - ax_inset.scatter(cross['x'][dindex::dindex], cross['y'][dindex::dindex], s=3.4, c='white', linewidths=0.2, edgecolors='k', zorder=bb.get_zorder()+1) + endpoints = data_crs.transform_points( + cartopy.crs.Geodetic(), *np.vstack([startpt, endpt]).transpose()[::-1]) + bb = ax_inset.scatter(endpoints[:, 0], endpoints[:, 1], c='k', zorder=2) + ax_inset.scatter(cross['x'], cross['y'], + s=3.4, c='white', linewidths=0.2, edgecolors='k', zorder=bb.get_zorder()+1) # Plot the path of the cross section ax_inset.plot(cross['x'], cross['y'], c='k', zorder=2) physics_tend.add_conus_features(ax_inset) extent = fv3["extent"] ax_inset.set_extent(extent) - # Annotate figure with details about figure creation. - fineprint = f"history: {os.path.realpath(ifile.name)}" - if subtract: - fineprint += f"\nsubtract: {os.path.realpath(subtract.name)}" - fineprint += f"\ngrid_spec: {os.path.realpath(gfile.name)}" - fineprint += f"\ncreated {datetime.datetime.now(tz=None)}" + # Annotate figure with args namespace and timestamp + fineprint = f"{args} " + fineprint += f"created {datetime.datetime.now(tz=None)}" if nofineprint: - logging.info(fineprint) + logging.debug(fineprint) else: - fineprint_obj = plt.annotate(text=fineprint, xy=(1,1), xycoords='figure pixels', fontsize=5) - + logging.debug("add fineprint to image") + plt.figtext(0, 0, fineprint, fontsize='xx-small', + va="bottom", wrap=True) plt.savefig(ofile, dpi=fv3["dpi"]) - logging.info(f'created {os.path.realpath(ofile)}') + logging.info('created %s', os.path.realpath(ofile)) + if __name__ == "__main__": main() diff --git a/metplotpy/contributed/fv3_physics_tend/pare_down_history.csh b/metplotpy/contributed/fv3_physics_tend/pare_down_history.csh index fac4770a..b7700a4b 100755 --- a/metplotpy/contributed/fv3_physics_tend/pare_down_history.csh +++ b/metplotpy/contributed/fv3_physics_tend/pare_down_history.csh @@ -2,8 +2,11 @@ module load nco set historyfile=/glade/scratch/mwong/dave/sample_cases/CONUS_25km_GFSv15p2/2019050412/fv3_history.nc +set historyfile=/glade/campaign/mmm/parc/ahijevyc/METplotpy/DavidA_fv3_history_etc_files/fv3_history.nc set gridspec=/glade/scratch/mwong/dave/sample_cases/CONUS_25km_GFSv15p2/2019050412/grid_spec.nc +set gridspec=/glade/campaign/mmm/parc/ahijevyc/METplotpy/DavidA_fv3_history_etc_files/grid_spec.nc -ncks -x -v phalf,refl_10cm,hgtsfc,pressfc,grle,snmr,rwmr,icmr,dzdt,delz,dpres,o3mr,clwmr,zsurf -d time,0,1 $historyfile `basename $historyfile` -ncks -v grid_xt,grid_yt,grid_lont,grid_latt,area $gridspec +# extract variables +ncks -v pfull,time,spfh,dtend_qv_pbl,dtend_qv_deepcnv,dtend_qv_shalcnv,dtend_qv_mp,dtend_qv_phys,dtend_qv_nophys,tmp,dtend_temp_lw,dtend_temp_sw,dtend_temp_pbl,dtend_temp_deepcnv,dtend_temp_shalcnv,dtend_temp_mp,dtend_temp_orogwd,dtend_temp_cnvgwd,dtend_temp_phys,dtend_temp_nophys,ugrd,dtend_u_pbl,dtend_u_orogwd,dtend_u_deepcnv,dtend_u_cnvgwd,dtend_u_shalcnv,dtend_u_phys,dtend_u_nophys,vgrd,dtend_v_pbl,dtend_v_orogwd,dtend_v_deepcnv,dtend_v_cnvgwd,dtend_v_shalcnv,dtend_v_phys,dtend_v_nophys -d time,0,2 $historyfile `basename $historyfile` +ncks -v grid_xt,grid_yt,grid_lont,grid_latt,area $gridspec $gridspec diff --git a/metplotpy/contributed/fv3_physics_tend/physics_tend.py b/metplotpy/contributed/fv3_physics_tend/physics_tend.py index e21444e0..dad0dab0 100644 --- a/metplotpy/contributed/fv3_physics_tend/physics_tend.py +++ b/metplotpy/contributed/fv3_physics_tend/physics_tend.py @@ -1,65 +1,25 @@ +"""Common functions for fv3_physics_tend""" +import logging +import os import cartopy.io.shapereader as shpreader import cartopy.feature as cfeature -import datetime -import logging import matplotlib.path import numpy as np -import os -import pandas as pd -from shapely.geometry import Point, multipolygon -#from tqdm import tqdm # progress bar +from shapely.geometry import multipolygon +# from tqdm import tqdm # progress bar import xarray -import yaml def add_conus_features(ax): - cl = ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.3) - bd = ax.add_feature(cfeature.BORDERS.with_scale('50m'), linewidth=0.3) - st = ax.add_feature(cfeature.STATES.with_scale('50m'), linewidth=0.1) - lk = ax.add_feature(cfeature.LAKES.with_scale('50m'), edgecolor='k', linewidth=0.25, facecolor='k', alpha=0.1) - return (cl, bd, st, lk) - - -def add_time0(ds, variable, fv3, interval=np.timedelta64(1,'h')): - tendency_varnames = fv3["tendency_varnames"] - time0_varname = fv3["time0_varname"] - # This takes a while so only keep requested state variable and - # its tendency_varnames. - tokeep = tendency_varnames[variable].copy() - tokeep.append(time0_varname[variable]) - tokeep.append(variable) - ds = ds[tokeep] - - # Return new Dataset with time0 (initialization time) - # Assume initialization time is an interval before first time - # in ds. Interval is 1 hour by default, but can be changed. - time0 = ds.time.values[0] - interval - times = np.insert(ds.time.values,0,time0) # new time array - # Allocate new Dataset with additional time at time0 - coords = dict(ds.coords) # extract ds.coords as dictionary so it can be changed. - coords.update(dict(time=times)) - data_vars = {} - dims = ds[variable].dims - # state variable at time0 and other times - vari = ds[time0_varname[variable]] - data = np.insert(ds[variable].data, 0, vari.data, axis=0) - data_vars[variable] = (dims, data) - # tendency=0 at time0 - logging.info(f"Adding time0 to {variable}. This could take a while...") - #for tendency in tqdm(tendency_varnames[variable]): - for tendency in tendency_varnames[variable]: - logging.debug(tendency) - dims = ds[tendency].dims - data = ds[tendency].data - data = np.insert(data, 0, np.zeros_like(data[0]), axis=0) - data_vars[tendency] = (dims, data) - ds0 = xarray.Dataset(data_vars=data_vars, coords=coords) - ds0.attrs = ds.attrs - for da in ds0: - ds0[da].attrs = ds[da].attrs - return ds0 + """ add borders """ + ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.3) + ax.add_feature(cfeature.BORDERS.with_scale('50m'), linewidth=0.3) + ax.add_feature(cfeature.STATES.with_scale('50m'), linewidth=0.1) + ax.add_feature(cfeature.LAKES.with_scale( + '50m'), edgecolor='k', linewidth=0.25, facecolor='k', alpha=0.1) + return ax -def pts_in_shp(lats, lons, shp, debug=False): +def pts_in_shp(lats, lons, shp): # Map longitude to -180 to +180 range lons = np.where(lons > 180, lons-360, lons) # If shp is a directory, point to .shp file of same name in it. @@ -67,7 +27,8 @@ def pts_in_shp(lats, lons, shp, debug=False): if os.path.isdir(shp): shp = shp + "/" + os.path.basename(shp) + ".shp" shape = shpreader.Reader(shp) - ll_array = np.hstack((lons.flatten()[:,np.newaxis],lats.flatten()[:,np.newaxis])) + ll_array = np.hstack( + (lons.flatten()[:, np.newaxis], lats.flatten()[:, np.newaxis])) mask = np.full(lats.flatten().shape, False) # How to make shapefile for EAST_CONUS (CONUS east of 105W) # import shapefile @@ -79,20 +40,21 @@ def pts_in_shp(lats, lons, shp, debug=False): # shape.to_file("EAST_CONUS") # It is as simple as that. - # This seems kind of hacky. Can you recurse through a mixture of Polygons and Multipolygons more elegantly? + # This seems kind of hacky. + # Can you recurse through a mixture of Polygons and Multipolygons more elegantly? # Tried geopandas read_shape . geometry but it was no more elegant. for g in shape.geometries(): logging.debug(f"{__name__} pts_in_shp area {g.area}") # How to deal with 3-D polygons (i.e. POLYGON Z)? some shape files are 3D. - if g.has_z: - logging.error(f"Uh oh. shape geometry has z-coordinate in {shp}") - logging.error("I don't know how to process 3-D polygons (i.e. POLYGON Z).") - sys.exit(1) + assert not g.has_z, (f"Uh oh. shape geometry has z-coordinate in {shp}" + "I don't know how to process 3-D polygons (i.e. POLYGON Z).") if isinstance(g, multipolygon.MultiPolygon): for mp in g.geoms: - mask = mask | matplotlib.path.Path(mp.exterior.coords).contains_points(ll_array) + mask = mask | matplotlib.path.Path( + mp.exterior.coords).contains_points(ll_array) else: - mask = mask | matplotlib.path.Path(g.exterior.coords).contains_points(ll_array) - logging.debug(f"pts_in_shp: {mask.sum()} points") + mask = mask | matplotlib.path.Path( + g.exterior.coords).contains_points(ll_array) + logging.debug("pts_in_shp: %s points", mask.sum()) shape.close() return np.reshape(mask, lats.shape) diff --git a/metplotpy/contributed/fv3_physics_tend/planview_fv3.py b/metplotpy/contributed/fv3_physics_tend/planview_fv3.py index 22f31628..3fb8edb5 100644 --- a/metplotpy/contributed/fv3_physics_tend/planview_fv3.py +++ b/metplotpy/contributed/fv3_physics_tend/planview_fv3.py @@ -1,70 +1,96 @@ +""" Plan view of tendencies """ import argparse -import cartopy import datetime import logging +import os +import cartopy import matplotlib.pyplot as plt from metpy.units import units import numpy as np -import os import pandas as pd -import pdb -from . import physics_tend -import sys import xarray import yaml - -""" -Plan view of tendencies of t, q, u, or v from physics parameterizations, dynamics (non-physics), their total, and residual. -Total change is the actual change in state variable from first time to last time. Total change differs from cumulative change -attributed to physics and non-physics tendencies when residual is not zero. -""" +from . import physics_tend def parse_args(): + """ + parse command line arguments + """ # =============Arguments=================== - parser = argparse.ArgumentParser(description = "Plan view of FV3 diagnostic tendency", formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = argparse.ArgumentParser( + description="Plan view of FV3 diagnostic tendencies", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) # ==========Mandatory Arguments=================== - parser.add_argument("config", type=argparse.FileType('r'), help="yaml configuration file") - parser.add_argument("historyfile", type=argparse.FileType("r"), help="FV3 history file") - parser.add_argument("gridfile", type=argparse.FileType("r"), help="FV3 grid spec file") - parser.add_argument("statevariable", type=str, help="moisture, temperature, or wind component variable name") - parser.add_argument("fill", type=str, help='type of tendency. ignored if pfull is a single level') + parser.add_argument("config", help="yaml configuration file") + parser.add_argument("historyfile", help="FV3 history file") + parser.add_argument("gridfile", help="FV3 grid spec file") + parser.add_argument( + "statevariable", help="moisture, temperature, or wind component variable name") + parser.add_argument( + "tendencytype", help='type of tendency. ignored if pfull is a single level') # ==========Optional Arguments=================== parser.add_argument("-d", "--debug", action='store_true') - parser.add_argument("--method", type=str, choices=["nearest", "linear","loglinear"], default="nearest", help="vertical interpolation method") - parser.add_argument("--ncols", type=int, default=None, help="number of columns") - parser.add_argument("--nofineprint", action='store_true', help="Don't add metadata and created by date (for comparing images)") - parser.add_argument("-o", "--ofile", type=str, help="name of output image file") - parser.add_argument("-p", "--pfull", nargs='+', type=float, default=[1000,925,850,700,500,300,200,100,0], help="pressure level(s) in hPa to plot. If only one pressure level is provided, the type-of-tendency argument will be ignored and all tendencies will be plotted.") - parser.add_argument("-s", "--shp", type=str, default=None, help="shape file directory for mask") - parser.add_argument("--subtract", type=argparse.FileType("r"), help="FV3 history file to subtract") - parser.add_argument("-t", "--twindow", type=int, default=3, help="time window in hours") - parser.add_argument("-v", "--validtime", type=lambda x:pd.to_datetime(x), help="valid time") + parser.add_argument("--method", choices=["nearest", "linear", "loglinear"], default="nearest", + help="vertical interpolation method") + parser.add_argument("--ncols", type=int, default=None, + help="number of columns") + parser.add_argument("--nofineprint", action='store_true', + help="Don't add metadata and created by date (for comparing images)") + parser.add_argument("--norobust", action='store_true', + help="compute colormap range with extremes, not 2nd and 98th percentiles") + parser.add_argument("-o", "--ofile", help="name of output image file") + parser.add_argument("-p", "--pfull", nargs='+', type=float, + default=[1000, 925, 850, 700, 500, 300, 200, 100, 0], + help=("pressure level(s) in hPa to plot. " + "If only one pressure level is provided, the type-of-tendency " + "argument will be ignored and all tendencies will be plotted.") + ) + parser.add_argument("-s", "--shp", help="shape file directory for mask") + parser.add_argument("--subtract", help="FV3 history file to subtract") + parser.add_argument("-t", "--twindow", type=float, + default=3, help="time window in hours") + parser.add_argument("-v", "--validtime", help="valid time") + parser.add_argument("--vmin", type=float, + help="color bar minimum (overrides robust=True)") + parser.add_argument("--vmax", type=float, + help="color bar maximum (overrides robust=True)") args = parser.parse_args() return args def main(): + """ + Plan view of tendencies of t, q, u, or v from physics parameterizations, + dynamics (non-physics), the combination of all tendencies (physics and non-physics), + the actual tendency, and the residual. Residual is the sum of all tendencies minus the + actual tendency. + """ args = parse_args() - gfile = args.gridfile - ifile = args.historyfile - variable = args.statevariable - fill = args.fill - config = args.config - debug = args.debug - method = args.method - ncols = args.ncols - nofineprint= args.nofineprint - ofile = args.ofile - pfull = args.pfull * units.hPa - shp = args.shp - subtract = args.subtract - twindow = datetime.timedelta(hours = args.twindow) - validtime = args.validtime + gfile = args.gridfile + ifile = args.historyfile + variable = args.statevariable + tendencytype = args.tendencytype + config = args.config + method = args.method + ncols = args.ncols + nofineprint = args.nofineprint + ofile = args.ofile + pfull = args.pfull * units.hPa + robust = not args.norobust + shp = args.shp + subtract = args.subtract + twindow = datetime.timedelta(hours=args.twindow) + twindow_quantity = twindow.total_seconds() * units.seconds + validtime = pd.to_datetime(args.validtime) + vmin = args.vmin + vmax = args.vmax level = logging.INFO - if debug: level = logging.DEBUG - logging.basicConfig(format='%(asctime)s - %(message)s', level=level) # prepend log message with time + if args.debug: + level = logging.DEBUG + # prepend log message with time + logging.basicConfig(format='%(asctime)s - %(message)s', level=level) logging.debug(args) # Output filename. @@ -74,199 +100,233 @@ def main(): ofile = os.path.realpath(args.ofile) odir = os.path.dirname(ofile) if not os.path.exists(odir): - logging.info(f"output directory {odir} does not exist. Creating it") + logging.info( + f"output directory {odir} does not exist. Creating it") os.mkdir(odir) - logging.info(f"output filename={ofile}") - + logging.debug("output filename=%s", ofile) # Reload fv3 in case user specifies a custom --config file - fv3 = yaml.load(open(config.name), Loader=yaml.FullLoader) + fv3 = yaml.load(open(config, encoding="utf8"), Loader=yaml.FullLoader) # Read lat/lon/area from gfile logging.debug(f"read lat/lon/area from {gfile}") - gds = xarray.open_dataset(gfile.name) + gds = xarray.open_dataset(gfile) lont = gds[fv3["lon_name"]] latt = gds[fv3["lat_name"]] area = gds["area"] # Open input file - logging.debug(f"About to open {ifile}") - fv3ds = xarray.open_dataset(ifile.name) + logging.debug("open %s", ifile) + fv3ds = xarray.open_dataset(ifile) + + if subtract: + logging.info("subtracting %s", subtract) + with xarray.set_options(keep_attrs=True): + fv3ds -= xarray.open_dataset(subtract) + datetimeindex = fv3ds.indexes['time'] if hasattr(datetimeindex, "to_datetimeindex"): - # Convert from CFTime to pandas datetime. I get a warning CFTimeIndex from non-standard calendar 'julian'. Maybe history file should be saved with standard calendar. + # Convert from CFTime to pandas datetime or get warning + # CFTimeIndex from non-standard calendar 'julian'. + # Maybe history file should be saved with standard calendar. # To turn off warning, set unsafe=True. datetimeindex = datetimeindex.to_datetimeindex(unsafe=True) + ragged_times = datetimeindex != datetimeindex.round('1ms') + if any(ragged_times): + logging.info( + f"round times to nearest millisec. before: {datetimeindex[ragged_times].values}") + datetimeindex = datetimeindex.round('1ms') + logging.info(f"after: {datetimeindex[ragged_times].values}") fv3ds['time'] = datetimeindex - if subtract: - logging.debug(f"subtracting {subtract.name}") - with xarray.set_options(keep_attrs=True): - fv3ds -= xarray.open_dataset(subtract.name) - fv3ds = fv3ds.assign_coords(lont=lont, latt=latt) # lont and latt used by pcolorfill() - tendency_vars = fv3["tendency_varnames"][variable] # list of tendency variable names for requested state variable - fv3ds = physics_tend.add_time0(fv3ds, variable, fv3) - tendencies = fv3ds[tendency_vars] # subset of original Dataset + # lont and latt used by pcolorfill() + fv3ds = fv3ds.assign_coords(lont=lont, latt=latt) if validtime is None: - logging.debug("validtime not provided on command line, so use latest time in history file.") validtime = fv3ds.time.values[-1] validtime = pd.to_datetime(validtime) + logging.info( + "validtime not provided on command line. Using last time in history file %s.", + validtime) time0 = validtime - twindow - time1 = time0 + datetime.timedelta(hours=1) - logging.info(f"Sum tendencies {time1}-{validtime}") - tindex = dict(time=slice(time1, validtime)) # slice of time from hour after time0 through validtime - tendencies_avg = tendencies.sel(tindex).mean(dim="time") # average tendencies in time - - # Dynamics (nophys) tendency is not reset every hour. Just calculate change from time0 to validtime. - nophys_var = [x for x in tendency_vars if x.endswith("_nophys")] - assert len(nophys_var) == 1 - nophys_var = nophys_var[0] # we don't want a 1-element list; we want a string. So that tendencies[nophys_var] is a DataArray, not a Dataset. - logging.info(f"Subtract nophys tendency at {time0} from {validtime}") - nophys_delta = tendencies[nophys_var].sel(time=validtime) - tendencies[nophys_var].sel(time=time0) - tendencies_avg[nophys_var] = nophys_delta / args.twindow - - - # Restore units after .mean() removed them. Copy units from 1st tendency variable. - tendency_units = units.parse_expression(fv3ds[tendency_vars[0]].units) - logging.debug(f"restoring {tendency_units} units after .mean() method removed them.") - tendencies_avg *= tendency_units - for da in tendencies_avg: - tendencies_avg[da] = tendencies_avg[da].metpy.convert_units("K/hour") - long_names = [fv3ds[da].attrs["long_name"] for da in tendencies_avg] # Make list of long_names before .to_array() loses them. - - # Remove characters up to and including 1st underscore (e.g. du3dt_) in DataArray name. - # for example dt3dt_pbl -> pbl - name_dict = {da : "_".join(da.split("_")[1:]) for da in tendencies_avg.data_vars} + assert time0 in fv3ds.time, (f"time0 {time0} not in history file. Closest is " + f"{fv3ds.time.sel(time=time0, method='nearest').time.data}") + + # list of tendency variable names for requested state variable + tendency_vars = fv3["tendency_varnames"][variable] + tendencies = fv3ds[tendency_vars] # subset of original Dataset + # convert DataArrays to Quantities to protect units. DataArray.mean drops units attribute. + tendencies = tendencies.metpy.quantify() + + # Define time slice starting with time-after-time0 and ending with validtime. + # We use the time *after* time0 because the time range corresponding to the tendency + # output is the period immediately prior to the tendency timestamp. + # That way, slice(time_after_time0, validtime) has a time range of [time0,validtime]. + idx_first_time_after_time0 = (fv3ds.time > time0).argmax() + time_after_time0 = fv3ds.time[idx_first_time_after_time0] + tindex = {"time": slice(time_after_time0, validtime)} + logging.debug( + "Time-weighted mean tendencies for time index slice %s", tindex) + timeweights = fv3ds.time.diff("time").sel(tindex) + time_weighted_tendencies = tendencies.sel(tindex) * timeweights + tendencies_avg = time_weighted_tendencies.sum( + dim="time") / timeweights.sum(dim="time") + + # Make list of long_names before .to_array() loses them. + long_names = [fv3ds[da].attrs["long_name"] for da in tendencies_avg] + + # Keep characters after final underscore. The first part is redundant. + # for example dtend_u_pbl -> pbl + name_dict = {da: "_".join(da.split("_")[-1:]) + for da in tendencies_avg.data_vars} + logging.debug("rename %s", name_dict) tendencies_avg = tendencies_avg.rename(name_dict) # Stack variables along new tendency dimension of new DataArray. tendency_dim = f"{variable} tendency" - tendencies_avg = tendencies_avg.to_array(dim=tendency_dim) - # Assign long_names to a new DataArray coordinate. It will have the same shape as tendency dimension. - tendencies_avg = tendencies_avg.assign_coords({"long_name":(tendency_dim,long_names)}) - - logging.info(f"calculate actual change in {variable}") - state_variable = fv3ds[variable].metpy.quantify() # Tried metpy.quantify() with open_dataset, but pint.errors.UndefinedUnitError: 'dBz' is not defined in the unit registry - dstate_variable = state_variable.sel(time = validtime) - state_variable.sel(time = time0) - dstate_variable = dstate_variable.assign_coords(time=validtime) - dstate_variable.attrs["long_name"] = f"actual change in {state_variable.attrs['long_name']}" - - # Add all tendencies together and subtract actual rate of change in state variable. - # This is residual. - total = tendencies_avg.sum(dim=tendency_dim) - twindow_quantity = twindow.total_seconds() * units.seconds - resid = total - dstate_variable/twindow_quantity - - - logging.info("Define DataArray to plot (da2plot).") - if len(pfull) == 1: - # If only 1 pressure level was requested, plot all tendencies. - da2plot = tendencies_avg - # Add total and resid DataArrays to tendency_dim. - total = total.expand_dims({tendency_dim:["total"]}).assign_coords(long_name="sum of tendencies") - resid = resid.expand_dims({tendency_dim:["resid"]}).assign_coords(long_name=f"sum of tendencies - actual rate of change of {variable} (residual)") - da2plot = xarray.concat([da2plot, total, resid], dim=tendency_dim) - col = tendency_dim - else: - # otherwise pick a DataArray (resid, state_variable, dstate_variable, tendency) + tendencies_avg = tendencies_avg.to_array(dim=tendency_dim, name=tendency_dim) + # Assign long_names to a new DataArray coordinate. + # It will have the same shape as tendency dimension. + tendencies_avg = tendencies_avg.assign_coords( + {"long_name": (tendency_dim, long_names)}) + + logging.info("calculate actual change in %s", variable) + # Tried metpy.quantify() with open_dataset, but + # pint.errors.UndefinedUnitError: 'dBz' is not defined in the unit registry + state_variable = fv3ds[variable].metpy.quantify() + actual_change = state_variable.sel(time=validtime) - state_variable.sel( + time=time0, method="nearest", tolerance=datetime.timedelta(milliseconds=1)) + actual_change = actual_change.assign_coords(time=validtime) + actual_change.attrs["long_name"] = f"actual change in {state_variable.attrs['long_name']}" + + # Sum all tendencies (physics and non-physics) + all_tendencies = tendencies_avg.sum(dim=tendency_dim) + + # Subtract physics tendency variable if it was in tendency_vars. Don't want to double-count. + phys_var = [x for x in tendency_vars if x.endswith("_phys")] + if phys_var: + logging.info("subtracting 'phys' tendency variable " + "from all_tendencies to avoid double-counting") + # use .data to avoid re-introducing tendency coordinate + all_tendencies = all_tendencies - \ + tendencies_avg.sel({tendency_dim: "phys"}).data + + # Calculate actual tendency of state variable. + actual_tendency = actual_change / twindow_quantity + logging.info( + "subtract actual tendency from all_tendencies to get residual") + resid = all_tendencies - actual_tendency + + # Concatenate all_tendencies, actual_tendency, and resid DataArrays. + # Give them a name and long_name along tendency_dim. + all_tendencies = all_tendencies.expand_dims({tendency_dim: ["all"]}).assign_coords( + long_name="sum of tendencies") + actual_tendency = actual_tendency.expand_dims({tendency_dim: ["actual"]}).assign_coords( + long_name=f"actual rate of change of {variable}") + resid = resid.expand_dims({tendency_dim: ["resid"]}).assign_coords( + long_name=f"sum of tendencies - actual rate of change of {variable} (residual)") + da2plot = xarray.concat( + [tendencies_avg, all_tendencies, actual_tendency, resid], dim=tendency_dim) + col = tendency_dim + + if len(pfull) > 1: + # If more than one pressure level was specified col = "pfull" - if fill == 'resid': # residual - da2plot = resid - elif fill == "": # plain-old state variable - da2plot = state_variable.sel(time=validtime) - elif fill == "d"+variable: # actual change in state variable - da2plot = dstate_variable - else: # expected change in state variable from tendencies - da2plot = tendencies_avg.sel({tendency_dim:fill}) + # just select one type of tendency + da2plot = da2plot.sel({tendency_dim: tendencytype}) if da2plot.metpy.vertical.attrs["units"] == "mb": - da2plot.metpy.vertical.attrs["units"] = "hPa" # For MetPy. Otherwise, mb is interpreted as millibarn. + # For MetPy. Otherwise, mb is interpreted as millibarn. + da2plot.metpy.vertical.attrs["units"] = "hPa" - # dequantify moves units from DataArray to Attributes. Now they show up in colorbar. And they aren't lost in xarray.DataArray.interp. + # dequantify moves units from DataArray to attributes. Now they show up in colorbar. + # And they aren't lost in xarray.DataArray.interp. da2plot = da2plot.metpy.dequantify() - # Select vertical levels. + logging.info(f"Select vertical levels with '{method}' method") if method == "nearest": - da2plot = da2plot.metpy.sel(vertical=pfull, method=method, tolerance=10.*units.hPa) + da2plot = da2plot.metpy.sel( + vertical=pfull, method=method, tolerance=10.*units.hPa) elif method == "linear": - da2plot = da2plot.interp(coords={"pfull":pfull}, method=method) - elif method == "loglinear": # interpolate in log10(pressure) - da2plot["pfull"] = np.log10(da2plot.pfull) - da2plot = da2plot.interp(coords={"pfull":np.log10(pfull.m)}, method="linear") + da2plot = da2plot.interp(coords={"pfull": pfull}, method=method) + elif method == "loglinear": # interpolate in log10(pressure) + da2plot["pfull"] = np.log10(da2plot.pfull) + da2plot = da2plot.interp( + coords={"pfull": np.log10(pfull.m)}, method="linear") da2plot["pfull"] = 10**da2plot.pfull - # Mask points outside shape. - if shp: - # mask points outside shape - mask = physics_tend.pts_in_shp(latt.values, lont.values, shp, debug=debug) # Use .values to avoid AttributeError: 'DataArray' object has no attribute 'flatten' - mask = xarray.DataArray(mask, coords=[da2plot.grid_yt, da2plot.grid_xt]) - da2plot = da2plot.where(mask, drop=True) - area = area.where(mask).fillna(0) + if shp is not None: + # Use .values to avoid AttributeError: 'DataArray' object has no attribute 'flatten' + mask = physics_tend.pts_in_shp( + latt.values, lont.values, shp) + mask = xarray.DataArray( + mask, coords=[da2plot.grid_yt, da2plot.grid_xt]) + da2plot = da2plot.where(mask) - totalarea = area.metpy.convert_units("km**2").sum() # Make default dimensions of facetgrid kind of square. if not ncols: # Default # of cols is square root of # of panels ncols = int(np.ceil(np.sqrt(len(da2plot)))) - nrows = int(np.ceil(len(da2plot)/ncols)) - - if da2plot["pfull"].size == 1: - # Avoid ValueError: ('grid_yt', 'grid_xt') must be a permuted list of ('pfull', 'grid_yt', 'grid_xt'), unless `...` is included - # Error occurs in pcolormesh(). - da2plot=da2plot.squeeze() - - - # central lon/lat from https://github.com/NOAA-EMC/regional_workflow/blob/release/public-v1/ush/Python/plot_allvars.py - subplot_kws = dict(projection=cartopy.crs.LambertConformal(central_longitude=-97.6, central_latitude=35.4)) - - - logging.info("plot pcolormesh") - pc = da2plot.plot.pcolormesh(x="lont", y="latt", col=col, col_wrap=ncols, robust=True, infer_intervals=True, - transform=cartopy.crs.PlateCarree(), - cmap=fv3["cmap"], cbar_kwargs={'shrink':0.8}, subplot_kws=subplot_kws) # robust (bool, optional) – If True and vmin or vmax are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values - for ax in pc.axes.flat: - ax.set_extent(fv3["extent"]) # Why needed only when col=tendency_dim? With col="pfull" it shrinks to unmasked size. + # Avoid ValueError in pcolormesh(). + da2plot = da2plot.squeeze() + + # central lon/lat from https://github.com/NOAA-EMC/regional_workflow/blob/ + # release/public-v1/ush/Python/plot_allvars.py + # switching from central_latitude=35.4 to central_latitude=fv3["standard_parallel"] + # (38.139 as of Aug 22, 2023) did not change plot appearance. + subplot_kws = { + "projection": cartopy.crs.LambertConformal( + central_longitude=-97.6, central_latitude=35.4)} + + logging.debug("plot pcolormesh") + if robust: + logging.warning("compute colormap range with 2nd and 98th percentiles") + pcm = da2plot.plot.pcolormesh(x="lont", y="latt", col=col, col_wrap=ncols, robust=robust, + infer_intervals=True, transform=cartopy.crs.PlateCarree(), + vmin=vmin, vmax=vmax, cmap=fv3["cmap"], + cbar_kwargs={'shrink': 0.8}, subplot_kws=subplot_kws) + for ax in pcm.axs.flat: + # Why needed only when col=tendency_dim? With col="pfull" it shrinks to unmasked size. + ax.set_extent(fv3["extent"]) physics_tend.add_conus_features(ax) - # Add time to title + # Add time to title title = f'{time0}-{validtime} ({twindow_quantity.to("hours"):~} time window)' if col == tendency_dim: - title = f'pfull={pfull[0]:~.0f} {title}' + title = f'pfull={da2plot.pfull.metpy.quantify().data:~.1f} {title}' elif 'long_name' in da2plot.coords: title = f'{da2plot.coords["long_name"].data} {title}' plt.suptitle(title, wrap=True) - # Annotate figure with details about figure creation. - fineprint = f"history: {os.path.realpath(ifile.name)}" - if subtract: - fineprint += f"\nsubtract: {os.path.realpath(subtract.name)}" - fineprint += f"\ngrid_spec: {os.path.realpath(gfile.name)}" - if shp: fineprint += f"\nmask: {shp}" - fineprint += f"\ntotal area: {totalarea.data:~.0f}" - fineprint += f"\nvertical interpolation method: {method} requested levels: {pfull}" - fineprint += f"\ncreated {datetime.datetime.now(tz=None)}" + # Annotate figure with args namespace and timestamp + fineprint = f"{args} " + fineprint += f"created {datetime.datetime.now(tz=None)}" if nofineprint: - logging.info(fineprint) + logging.debug(fineprint) else: - fineprint_obj = plt.annotate(text=fineprint, xy=(1,1), xycoords='figure pixels', fontsize=5) - + logging.debug("add fineprint to image") + plt.figtext(0, 0, fineprint, fontsize='xx-small', + va="bottom", wrap=True) plt.savefig(ofile, dpi=fv3["dpi"]) - logging.info(f'created {os.path.realpath(ofile)}') + logging.info('created %s', os.path.realpath(ofile)) + def default_ofile(args): + """ + Return default output filename. + """ pfull = args.pfull * units.hPa if len(pfull) == 1: - pfull_str = f"{pfull[0]:~.0f}".replace(" ","") + pfull_str = f"{pfull[0]:~.0f}".replace(" ", "") ofile = f"{args.statevariable}_{pfull_str}.png" else: - ofile = f"{args.statevariable}_{args.fill}.png" - if args.shp: + ofile = f"{args.statevariable}_{args.tendencytype}.png" + if args.shp is not None: shp = shp.rstrip("/") # Add shapefile name to output filename shapename = os.path.basename(shp) diff --git a/metplotpy/contributed/fv3_physics_tend/vert_profile_fv3.py b/metplotpy/contributed/fv3_physics_tend/vert_profile_fv3.py index 19d4534f..c5948e0a 100644 --- a/metplotpy/contributed/fv3_physics_tend/vert_profile_fv3.py +++ b/metplotpy/contributed/fv3_physics_tend/vert_profile_fv3.py @@ -1,65 +1,77 @@ +""" Vertical profile of tendencies """ import argparse -import cartopy import datetime import logging +import os +import cartopy import matplotlib.pyplot as plt from matplotlib.ticker import MultipleLocator from metpy.units import units -import numpy as np -import os import pandas as pd -import pdb -from . import physics_tend -import sys import xarray import yaml - -""" -Vertical profile of tendencies of t, q, u, or v from physics parameterizations, dynamics (non-physics), their total, and residual. -Total change is the actual change in state variable from first time to last time. Total change differs from cumulative change -attributed to physics and non-physics tendencies when residual is not zero. -""" +from . import physics_tend def parse_args(): + """ + parse command line arguments + """ # =============Arguments=================== - parser = argparse.ArgumentParser(description = "Vertical profile of FV3 diagnostic tendencies", formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = argparse.ArgumentParser( + description="Vertical profile of FV3 diagnostic tendencies", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) # ==========Mandatory Arguments=================== - parser.add_argument("config", type=argparse.FileType('r'), help="yaml configuration file") - parser.add_argument("historyfile", type=argparse.FileType("r"), help="FV3 history file") - parser.add_argument("gridfile", type=argparse.FileType("r"), help="FV3 grid spec file") - parser.add_argument("statevariable", type=str, help="moisture, temperature, or wind component variable name") + parser.add_argument("config", help="yaml configuration file") + parser.add_argument("historyfile", help="FV3 history file") + parser.add_argument("gridfile", help="FV3 grid spec file") + parser.add_argument( + "statevariable", help="moisture, temperature, or wind component variable name") # ==========Optional Arguments=================== parser.add_argument("-d", "--debug", action='store_true') - parser.add_argument("--nofineprint", action='store_true', help="Don't add metadata and created by date (for comparing images)") - parser.add_argument("-o", "--ofile", type=str, help="name of output image file") - parser.add_argument("--resid", action="store_true", help="calculate residual") - parser.add_argument("-s", "--shp", type=str, default=None, help="shape file directory for mask") - parser.add_argument("--subtract", type=argparse.FileType("r"), help="FV3 history file to subtract") - parser.add_argument("-t", "--twindow", type=int, default=3, help="time window in hours") - parser.add_argument("-v", "--validtime", type=lambda x:pd.to_datetime(x), help="valid time") + parser.add_argument("--nofineprint", action='store_true', + help="Don't add metadata and created by date (for comparing images)") + parser.add_argument("-o", "--ofile", help="name of output image file") + parser.add_argument("--resid", action="store_true", + help="calculate residual") + parser.add_argument("-s", "--shp", help="shape file directory for mask") + parser.add_argument("--subtract", help="FV3 history file to subtract") + parser.add_argument("-t", "--twindow", type=float, + default=3, help="time window in hours") + parser.add_argument("-v", "--validtime", help="valid time") + parser.add_argument("--xmin", type=float, help="x-axis minimum") + parser.add_argument("--xmax", type=float, help="x-axis maximum") args = parser.parse_args() return args def main(): + """ + Vertical profile of tendencies of t, q, u, or v from physics parameterizations, + dynamics (non-physics), the combination of all tendencies (physics and non-physics), + the actual tendency, and the residual. Residual is the sum of all tendencies minus the + actual tendency. + """ args = parse_args() - gfile = args.gridfile - ifile = args.historyfile - variable = args.statevariable - config = args.config - debug = args.debug - resid = args.resid - nofineprint= args.nofineprint - ofile = args.ofile - shp = args.shp - subtract = args.subtract - twindow = datetime.timedelta(hours = args.twindow) - validtime = args.validtime + gfile = args.gridfile + ifile = args.historyfile + variable = args.statevariable + config = args.config + nofineprint = args.nofineprint + ofile = args.ofile + shp = args.shp + subtract = args.subtract + twindow = datetime.timedelta(hours=args.twindow) + twindow_quantity = twindow.total_seconds() * units.seconds + validtime = pd.to_datetime(args.validtime) + xmin = args.xmin + xmax = args.xmax level = logging.INFO - if debug: level = logging.DEBUG - logging.basicConfig(format='%(asctime)s - %(message)s', level=level) # prepend log message with time + if args.debug: + level = logging.DEBUG + # prepend log message with time + logging.basicConfig(format='%(asctime)s - %(message)s', level=level) logging.debug(args) # Output filename. @@ -75,136 +87,173 @@ def main(): ofile = os.path.realpath(args.ofile) odir = os.path.dirname(ofile) if not os.path.exists(odir): - logging.info(f"output directory {odir} does not exist. Creating it") + logging.info( + f"output directory {odir} does not exist. Creating it") os.mkdir(odir) - logging.info(f"output filename={ofile}") - + logging.debug("output filename=%s", ofile) # Reload fv3 in case user specifies a custom --config file - fv3 = yaml.load(open(config.name), Loader=yaml.FullLoader) + fv3 = yaml.load(open(config, encoding="utf8"), Loader=yaml.FullLoader) # Read lat/lon/area from gfile logging.debug(f"read lat/lon/area from {gfile}") - gds = xarray.open_dataset(gfile.name) + gds = xarray.open_dataset(gfile) lont = gds[fv3["lon_name"]] latt = gds[fv3["lat_name"]] area = gds["area"] # Open input file - logging.debug(f"About to open {ifile}") - fv3ds = xarray.open_dataset(ifile.name) + logging.debug("open %s", ifile) + fv3ds = xarray.open_dataset(ifile) + + if subtract: + logging.info("subtracting %s", subtract) + with xarray.set_options(keep_attrs=True): + fv3ds -= xarray.open_dataset(subtract) + datetimeindex = fv3ds.indexes['time'] if hasattr(datetimeindex, "to_datetimeindex"): - # Convert from CFTime to pandas datetime. I get a warning CFTimeIndex from non-standard calendar 'julian'. Maybe history file should be saved with standard calendar. + # Convert from CFTime to pandas datetime or get warning + # CFTimeIndex from non-standard calendar 'julian'. + # Maybe history file should be saved with standard calendar. # To turn off warning, set unsafe=True. datetimeindex = datetimeindex.to_datetimeindex(unsafe=True) + ragged_times = datetimeindex != datetimeindex.round('1ms') + if any(ragged_times): + logging.info( + f"round times to nearest millisec. before: {datetimeindex[ragged_times].values}") + datetimeindex = datetimeindex.round('1ms') + logging.info(f"after: {datetimeindex[ragged_times].values}") fv3ds['time'] = datetimeindex - if subtract: - logging.debug(f"subtracting {subtract.name}") - with xarray.set_options(keep_attrs=True): - fv3ds -= xarray.open_dataset(subtract.name) - fv3ds = fv3ds.assign_coords(lont=lont, latt=latt) # lont and latt used by pcolorfill() - tendency_vars = fv3["tendency_varnames"][variable] # list of tendency variable names for requested state variable - fv3ds = physics_tend.add_time0(fv3ds, variable, fv3) - tendencies = fv3ds[tendency_vars] # subset of original Dataset + # lont and latt used by pcolorfill() + fv3ds = fv3ds.assign_coords(lont=lont, latt=latt) if validtime is None: - logging.debug("validtime not provided on command line, so use latest time in history file.") validtime = fv3ds.time.values[-1] validtime = pd.to_datetime(validtime) + logging.info( + "validtime not provided on command line. Using last time in history file %s.", + validtime) time0 = validtime - twindow - time1 = time0 + datetime.timedelta(hours=1) - logging.info(f"Sum tendencies {time1}-{validtime}") - tindex = dict(time=slice(time1, validtime)) # slice of time from hour after time0 through validtime - tendencies_avg = tendencies.sel(tindex).mean(dim="time") # average tendencies in time - - # Dynamics (nophys) tendency is not reset every hour. Just calculate change from time0 to validtime. - nophys_var = [x for x in tendency_vars if x.endswith("_nophys")] - assert len(nophys_var) == 1 - nophys_var = nophys_var[0] # we don't want a 1-element list; we want a string. So that tendencies[nophys_var] is a DataArray, not a Dataset. - logging.info(f"Subtract nophys tendency at {time0} from {validtime}") - nophys_delta = tendencies[nophys_var].sel(time=validtime) - tendencies[nophys_var].sel(time=time0) - tendencies_avg[nophys_var] = nophys_delta / args.twindow - - - # Restore units after .mean() removed them. Copy units from 1st tendency variable. - tendency_units = units.parse_expression(fv3ds[tendency_vars[0]].units) - logging.debug(f"restoring {tendency_units} units after .mean() method removed them.") - tendencies_avg *= tendency_units - for da in tendencies_avg: - tendencies_avg[da] = tendencies_avg[da].metpy.convert_units("K/hour") - long_names = [fv3ds[da].attrs["long_name"] for da in tendencies_avg] # Make list of long_names before .to_array() loses them. - - # Remove characters up to and including 1st underscore (e.g. du3dt_) in DataArray name. - # for example dt3dt_pbl -> pbl - name_dict = {da : "_".join(da.split("_")[1:]) for da in tendencies_avg.data_vars} + assert time0 in fv3ds.time, (f"time0 {time0} not in history file. Closest is " + f"{fv3ds.time.sel(time=time0, method='nearest').time.data}") + + # list of tendency variable names for requested state variable + tendency_vars = fv3["tendency_varnames"][variable] + tendencies = fv3ds[tendency_vars] # subset of original Dataset + # convert DataArrays to Quantities to protect units. DataArray.mean drops units attribute. + tendencies = tendencies.metpy.quantify() + + # Define time slice starting with time-after-time0 and ending with validtime. + # We use the time *after* time0 because the time range corresponding to the tendency + # output is the period immediately prior to the tendency timestamp. + # That way, slice(time_after_time0, validtime) has a time range of [time0,validtime]. + idx_first_time_after_time0 = (fv3ds.time > time0).argmax() + time_after_time0 = fv3ds.time[idx_first_time_after_time0] + tindex = {"time": slice(time_after_time0, validtime)} + logging.debug( + "Time-weighted mean tendencies for time index slice %s", tindex) + timeweights = fv3ds.time.diff("time").sel(tindex) + time_weighted_tendencies = tendencies.sel(tindex) * timeweights + tendencies_avg = time_weighted_tendencies.sum( + dim="time") / timeweights.sum(dim="time") + + # Make list of long_names before .to_array() loses them. + long_names = [fv3ds[da].attrs["long_name"] for da in tendencies_avg] + + # Keep characters after final underscore. The first part is redundant. + # for example dtend_u_pbl -> pbl + name_dict = {da: "_".join(da.split("_")[-1:]) + for da in tendencies_avg.data_vars} + logging.debug("rename %s", name_dict) tendencies_avg = tendencies_avg.rename(name_dict) # Stack variables along new tendency dimension of new DataArray. tendency_dim = f"{variable} tendency" - tendencies_avg = tendencies_avg.to_array(dim=tendency_dim) - # Assign long_names to a new DataArray coordinate. It will have the same shape as tendency dimension. - tendencies_avg = tendencies_avg.assign_coords({"long_name":(tendency_dim,long_names)}) - - logging.info(f"calculate actual change in {variable}") - state_variable = fv3ds[variable].metpy.quantify() # Tried metpy.quantify() with open_dataset, but pint.errors.UndefinedUnitError: 'dBz' is not defined in the unit registry - dstate_variable = state_variable.sel(time = validtime) - state_variable.sel(time = time0) - dstate_variable = dstate_variable.assign_coords(time=validtime) - dstate_variable.attrs["long_name"] = f"actual change in {state_variable.attrs['long_name']}" - - # Add all tendencies together and subtract actual rate of change in state variable. - # This is residual. - total = tendencies_avg.sum(dim=tendency_dim) - twindow_quantity = twindow.total_seconds() * units.seconds - resid = total - dstate_variable/twindow_quantity + tendencies_avg = tendencies_avg.to_array(dim=tendency_dim, name=tendency_dim) + # Assign long_names to a new DataArray coordinate. + # It will have the same shape as tendency dimension. + tendencies_avg = tendencies_avg.assign_coords( + {"long_name": (tendency_dim, long_names)}) + + logging.info("calculate actual change in %s", variable) + # Tried metpy.quantify() with open_dataset, but + # pint.errors.UndefinedUnitError: 'dBz' is not defined in the unit registry + state_variable = fv3ds[variable].metpy.quantify() + actual_change = state_variable.sel(time=validtime) - state_variable.sel( + time=time0, method="nearest", tolerance=datetime.timedelta(milliseconds=1)) + actual_change = actual_change.assign_coords(time=validtime) + actual_change.attrs["long_name"] = f"actual change in {state_variable.attrs['long_name']}" + + # Sum all tendencies (physics and non-physics) + all_tendencies = tendencies_avg.sum(dim=tendency_dim) + + # Subtract physics tendency variable if it was in tendency_vars. Don't want to double-count. + phys_var = [x for x in tendency_vars if x.endswith("_phys")] + if phys_var: + logging.info("subtracting 'phys' tendency variable " + "from all_tendencies to avoid double-counting") + # use .data to avoid re-introducing tendency coordinate + all_tendencies = all_tendencies - \ + tendencies_avg.sel({tendency_dim: "phys"}).data + + # Calculate actual tendency of state variable. + actual_tendency = actual_change / twindow_quantity + logging.info( + "subtract actual tendency from all_tendencies to get residual") + resid = all_tendencies - actual_tendency da2plot = tendencies_avg - if resid is not None: - # Add total and resid DataArrays to tendency_dim. - total = total.expand_dims({tendency_dim:["total"]}).assign_coords(long_name="sum of tendencies") - resid = resid.expand_dims({tendency_dim:["resid"]}).assign_coords(long_name=f"sum of tendencies - actual rate of change of {variable} (residual)") - da2plot = xarray.concat([da2plot, total, resid], dim=tendency_dim) - + if args.resid: + # Concatenate all_tendencies, actual_tendency, and resid DataArrays. + # Give them a name and long_name along tendency_dim. + all_tendencies = all_tendencies.expand_dims({tendency_dim: ["all"]}).assign_coords( + long_name="sum of tendencies") + actual_tendency = actual_tendency.expand_dims({tendency_dim: ["actual"]}).assign_coords( + long_name=f"actual rate of change of {variable}") + resid = resid.expand_dims({tendency_dim: ["resid"]}).assign_coords( + long_name=f"sum of tendencies - actual rate of change of {variable} (residual)") + da2plot = xarray.concat( + [da2plot, all_tendencies, actual_tendency, resid], dim=tendency_dim) # Mask points outside shape. - if shp: - # mask points outside shape - mask = physics_tend.pts_in_shp(latt.values, lont.values, shp, debug=debug) # Use .values to avoid AttributeError: 'DataArray' object has no attribute 'flatten' - mask = xarray.DataArray(mask, coords=[da2plot.grid_yt, da2plot.grid_xt]) - da2plot = da2plot.where(mask, drop=True) - area = area.where(mask).fillna(0) - - totalarea = area.metpy.convert_units("km**2").sum() - - - logging.info(f"area-weighted spatial average") + if shp is not None: + # Use .values to avoid AttributeError: 'DataArray' object has no attribute 'flatten' + mask = physics_tend.pts_in_shp( + latt.values, lont.values, shp) + mask = xarray.DataArray( + mask, coords=[da2plot.grid_yt, da2plot.grid_xt]) + da2plot = da2plot.where(mask) + + logging.info("area-weighted spatial average") da2plot = da2plot.weighted(area).mean(area.dims) - + # Put units in attributes so they show up in xlabel. + # dequantify after area-weighted mean to preserve units. + da2plot = da2plot.metpy.dequantify() logging.info("creating figure") - fig, ax = plt.subplots() - plt.subplots_adjust(bottom=0.18) # add space at bottom for fine print - ax.invert_yaxis() # pressure increases from top to bottom + _, ax = plt.subplots() + plt.subplots_adjust(bottom=0.18) # add space at bottom for fine print + ax.invert_yaxis() # pressure increases from top to bottom ax.grid(visible=True, color="grey", alpha=0.5, lw=0.5) ax.yaxis.set_major_locator(MultipleLocator(100)) ax.yaxis.set_minor_locator(MultipleLocator(25)) ax.grid(which='minor', alpha=0.3, lw=0.4) - # Put units in attributes so they show up in xlabel. - da2plot = da2plot.metpy.dequantify() - logging.info("plot area-weighted spatial average...") - lines = da2plot.plot.line(y="pfull", ax=ax, hue=tendency_dim) + lines = da2plot.plot.line( + y="pfull", ax=ax, xlim=(xmin, xmax), hue=tendency_dim) - if resid is not None: # resid might have been turned from a Boolean scalar variable to a DataArray. - # Add special marker to dstate_variable and residual lines. - # DataArray plot legend handles differ from the plot lines, for some reason. So if you + if args.resid: + # Add special marker to actual_change and residual lines. + # DataArray plot legend handles differ from the plot lines, for some reason. So if you # change the style of a line later, it is not automatically changed in the legend. - # zip d{variable}, resid line and their respective legend handles together and change their style together. + # zip d{variable}, resid line and their respective legend handles together and change + # their style together. # [-2:] means take last two elements of da2plot. - special_lines = list(zip(lines, ax.get_legend().legendHandles))[-2:] + special_lines = list(zip(lines, ax.get_legend().legend_handles))[-2:] special_marker = 'o' special_marker_size = 3 for line, leghandle in special_lines: @@ -217,36 +266,38 @@ def main(): title = f'{time0}-{validtime} ({twindow_quantity.to("hours"):~} time window)' ax.set_title(title, wrap=True) - if shp: + if shp is not None: # Locate region of interest on conus map background. Put in inset. - projection = cartopy.crs.LambertConformal(central_longitude=-97.5, central_latitude=fv3["standard_parallel"]) - ax_inset = plt.gcf().add_axes([.7, .001, .19, .13], projection=projection) + projection = cartopy.crs.LambertConformal( + central_longitude=-97.5, central_latitude=fv3["standard_parallel"]) + ax_inset = plt.gcf().add_axes( + [.7, .001, .19, .13], projection=projection) # astype(int) to avoid TypeError: numpy boolean subtract - cbar_kwargs = dict(ticks=[0.25,0.75],shrink=0.6) - pc = mask.assign_coords(lont=lont, latt=latt).astype(int).plot.pcolormesh(ax=ax_inset, x="lont", y="latt", infer_intervals=True, - transform=cartopy.crs.PlateCarree(), cmap=plt.cm.get_cmap('cool',2), add_labels=False, cbar_kwargs=cbar_kwargs) - pc.colorbar.ax.set_yticklabels(["masked","valid"], fontsize='xx-small') - pc.colorbar.outline.set_visible(False) + cbar_kwargs = {"ticks":[0.25, 0.75], "shrink":0.6} + pcm = mask.assign_coords(lont=lont, latt=latt).astype(int).plot.pcolormesh( + ax=ax_inset, x="lont", y="latt", infer_intervals=True, + transform=cartopy.crs.PlateCarree(), cmap=plt.colormaps['cool'], + add_labels=False, cbar_kwargs=cbar_kwargs) + pcm.colorbar.ax.set_yticklabels( + ["masked", "valid"], fontsize='xx-small') + pcm.colorbar.outline.set_visible(False) physics_tend.add_conus_features(ax_inset) extent = fv3["extent"] ax_inset.set_extent(extent) - # Annotate figure with details about figure creation. - fineprint = f"history: {os.path.realpath(ifile.name)}" - if subtract: - fineprint += f"\nsubtract: {os.path.realpath(subtract.name)}" - fineprint += f"\ngrid_spec: {os.path.realpath(gfile.name)}" - if shp: fineprint += f"\nmask: {shp}" - fineprint += f"\ntotal area: {totalarea.data:~.0f}" - fineprint += f"\ncreated {datetime.datetime.now(tz=None)}" + # Annotate figure with args namespace and timestamp + fineprint = f"{args} " + fineprint += f"created {datetime.datetime.now(tz=None)}" if nofineprint: - logging.info(fineprint) + logging.debug(fineprint) else: - fineprint_obj = plt.annotate(text=fineprint, xy=(1,1), xycoords='figure pixels', fontsize=5) - + logging.debug("add fineprint to image") + plt.figtext(0, 0, fineprint, fontsize='xx-small', + va="bottom", wrap=True) plt.savefig(ofile, dpi=fv3["dpi"]) - logging.info(f'created {os.path.realpath(ofile)}') + logging.info('created %s', os.path.realpath(ofile)) + if __name__ == "__main__": main() diff --git a/metplotpy/contributed/stratosphere_diagnostics/stratosphere_plots.py b/metplotpy/contributed/stratosphere_diagnostics/stratosphere_plots.py index 491e67a3..d95099c3 100644 --- a/metplotpy/contributed/stratosphere_diagnostics/stratosphere_plots.py +++ b/metplotpy/contributed/stratosphere_diagnostics/stratosphere_plots.py @@ -1,5 +1,6 @@ import xarray as xr # http://xarray.pydata.org/ import numpy as np +import pandas as pd import cmocean import matplotlib from matplotlib import cm @@ -55,3 +56,143 @@ def plot_polar_contour(leads,levels,pdata,outfile,ptitle,plevs,ctable): plt.savefig(outfile) plt.close() + +def plot_qbo_phase_circuits(inits,periods,rean_qbo_pcs,rfcst_qbo_pcs,outfile): + + fig = plt.figure(1) + + for i,init in enumerate(inits): + + dates = pd.date_range(init, periods=periods, freq="1D") + #Remove leap days + dates = dates[(dates.month != 2) | (dates.day != 29)] + ax = fig.add_subplot(4,7,i+1) + + # plot the QBO phase space time series + plt_handles = [] + + # green/red dots are the start/end of the fcst period + plt.plot(rfcst_qbo_pcs.sel(mode=0, time=dates), + rfcst_qbo_pcs.sel(mode=1, time=dates), linewidth=1.5) + plt.plot(rfcst_qbo_pcs.sel(mode=0, time=dates[0]), + rfcst_qbo_pcs.sel(mode=1, time=dates[0]), marker="o", color="green") + plt.plot(rfcst_qbo_pcs.sel(mode=0, time=dates[-1]), + rfcst_qbo_pcs.sel(mode=1, time=dates[-1]), marker="o", color="red") + + # plot same thing from reanalysis in black + plt.plot(rean_qbo_pcs.sel(mode=0, time=dates), + rean_qbo_pcs.sel(mode=1, time=dates), color='black', linewidth=1.5) + plt.plot(rean_qbo_pcs.sel(mode=0, time=dates[0]), + rean_qbo_pcs.sel(mode=1, time=dates[0]), marker="o", color="green") + plt.plot(rean_qbo_pcs.sel(mode=0, time=dates[-1]), + rean_qbo_pcs.sel(mode=1, time=dates[-1]), marker="o", color="red") + plt.xlim((-2.5,2.5)) + plt.ylim((-2.5,2.5)) + plt.gca().set_aspect('equal') + plt.title('Start: '+init.strftime('%Y-%m-%d'), fontsize=22) + + # plot the QBO phase lines + amp = 2.5 + ax.plot([-amp,amp],[-amp,amp], linewidth=0.5, linestyle='--', color='k', zorder=0) + ax.plot([-amp,amp],[amp,-amp], linewidth=0.5, linestyle='--', color='k', zorder=0) + ax.plot([-amp,amp],[0,0], linewidth=0.5, linestyle='--', color='k', zorder=0) + ax.plot([0,0],[-amp,amp], linewidth=0.5, linestyle='--', color='k', zorder=0) + ax.set_xticks(np.arange(-2.,2.1,1)) + ax.set_yticks(np.arange(-2.,2.1,1)) + + + fig.set_size_inches(24,16) + fig.subplots_adjust(hspace = 0.25, left=0.07, right=0.92, top=0.92, bottom=0.07) + plt.savefig(outfile, dpi=150, facecolor='white', bbox_inches="tight") + + + +def plot_qbo_phase_space(rean_qbo_pcs,eofs,ptitle,outfile): + + from mpl_toolkits.axes_grid1.inset_locator import (inset_axes, InsetPosition, mark_inset) + + fig = plt.figure(4) + + ax = fig.add_subplot(1,1,1) + ax.plot(rean_qbo_pcs.sel(mode=0), rean_qbo_pcs.sel(mode=1), alpha=0.75, linewidth=0.6) + ax.set_xlim((-2.5,2.5)) + ax.set_ylim((-2.5,2.5)) + ax.set_aspect('equal') + + amp = 2.5 + ax.plot([-amp,amp],[-amp,amp], linewidth=0.65, linestyle='--', color='k', zorder=0) + ax.plot([-amp,amp],[amp,-amp], linewidth=0.65, linestyle='--', color='k', zorder=0) + ax.plot([-amp,amp],[0,0], linewidth=0.65, linestyle='--', color='k', zorder=0) + ax.plot([0,0],[-amp,amp], linewidth=0.65, linestyle='--', color='k', zorder=0) + ax.set_xticks(np.arange(-2.,2.1,1)) + ax.set_yticks(np.arange(-2.,2.1,1)) + + ax.set_xlabel('EOF1', fontsize=20) + ax.set_ylabel('EOF2', fontsize=20) + ax.tick_params(which='major', axis='both', length=7, labelsize=18) + ax.grid(True,alpha=0.5) + + ## Plot idealized zonal wind profiles for different combinations of + ## EOF values + inset_axes = [] + axi = ax.inset_axes([2,-0.25,0.5,0.5], transform=ax.transData) + axi.plot(eofs.sel(mode=0), eofs.pres, linewidth=1.5) + inset_axes.append(axi) + + axi = ax.inset_axes([np.sqrt(2)+0.2, np.sqrt(2)+0.2, 0.5, 0.5], transform=ax.transData) + axi.plot(eofs.sel(mode=0)+eofs.sel(mode=1), eofs.pres, linewidth=1.5) + inset_axes.append(axi) + + axi = ax.inset_axes([-0.25,2, 0.5, 0.5], transform=ax.transData) + axi.plot(eofs.sel(mode=1), eofs.pres, linewidth=1.5) + inset_axes.append(axi) + + axi = ax.inset_axes([-np.sqrt(2)-0.2-0.5, np.sqrt(2)+0.2, 0.5, 0.5], transform=ax.transData) + axi.plot(-1*eofs.sel(mode=0)+eofs.sel(mode=1), eofs.pres, linewidth=1.5) + inset_axes.append(axi) + + axi = ax.inset_axes([-2.5,-0.25, 0.5, 0.5], transform=ax.transData) + axi.plot(-1*eofs.sel(mode=0), eofs.pres, linewidth=1.5) + inset_axes.append(axi) + + axi = ax.inset_axes([-np.sqrt(2)-0.2-0.5, -np.sqrt(2)-0.2-0.5, 0.5, 0.5], transform=ax.transData) + axi.plot(-1*eofs.sel(mode=0)-eofs.sel(mode=1), eofs.pres, linewidth=1.5) + inset_axes.append(axi) + + axi = ax.inset_axes([-0.25,-2.5, 0.5, 0.5], transform=ax.transData) + axi.plot(-1*eofs.sel(mode=1), eofs.pres, linewidth=1.5) + inset_axes.append(axi) + + axi = ax.inset_axes([np.sqrt(2)+0.2, -np.sqrt(2)-0.2-0.5, 0.5, 0.5], transform=ax.transData) + axi.plot(eofs.sel(mode=0)-eofs.sel(mode=1), eofs.pres, linewidth=1.5) + inset_axes.append(axi) + + for axi in inset_axes: + axi.invert_yaxis() + axi.set_yscale('log') + axi.set_yticks([]) + axi.set_xticks([]) + axi.minorticks_off() + axi.set_xlim(-1.1,1.1) + axi.axvline(0, color='black') + axi.set_facecolor('lightgrey') + + ax.set_title(ptitle, fontsize=24, fontweight='semibold') + fig.set_size_inches(10,10) + + plt.savefig(outfile, dpi=150, facecolor='white') + + +def plot_u_timeseries(obs_dt,obs_u,fcst_dt,fcst_u,plot_title,outfile): + + fig = plt.figure(figsize=(16,8)) + ax = fig.add_subplot(1,1,1) + ax.plot(fcst_dt,fcst_u,'r',linewidth = 2,label='model') + ax.plot(obs_dt,obs_u,'b',linewidth = 2,label='obs') + ax.set_title(plot_title,fontsize=20) + ax.set_xlabel('Date',fontsize=14) + ax.set_ylabel('Zonal Mean U (m/s)',fontsize=14) + ax.xaxis.set_major_locator(mticker.MultipleLocator(15)) + plt.legend() + plt.savefig(outfile) + #plt.savefig(outfile,bbox_inches='tight') diff --git a/metplotpy/plots/bar/bar.py b/metplotpy/plots/bar/bar.py index f02042c9..cb881886 100644 --- a/metplotpy/plots/bar/bar.py +++ b/metplotpy/plots/bar/bar.py @@ -19,7 +19,6 @@ import pandas as pd import plotly.graph_objects as go -import yaml from plotly.graph_objects import Figure from plotly.subplots import make_subplots @@ -53,11 +52,11 @@ def __init__(self, parameters: dict) -> None: # the # config file that represents the BasePlot object (Bar). self.config_obj = BarConfig(self.parameters) - self.bar_logger = self.config_obj.logger - self.bar_logger.info(f"Start bar plot: {datetime.now()}") + self.logger = self.config_obj.logger + self.logger.info(f"Start bar plot: {datetime.now()}") # Check that we have all the necessary settings for each series - self.bar_logger.info(f"Consistency checking of config settings for colors, " - f"legends, etc.") + self.logger.info("Consistency checking of config settings for colors, " + "legends, etc.") is_config_consistent = self.config_obj._config_consistency_check() if not is_config_consistent: value_error_msg = ("ValueError: The number of series defined by series_val_1 and " @@ -66,20 +65,19 @@ def __init__(self, parameters: dict) -> None: "check the number of your configuration file's " "plot_i, plot_disp, series_order, user_legend, show_legend and " "colors settings.") - self.bar_logger.error(value_error_msg) + self.logger.error(value_error_msg) raise ValueError(value_error_msg) - # Read in input data, location specified in config file - self.bar_logger.info(f"Begin reading input data: {datetime.now()}") + self.logger.info(f"Begin reading input data: {datetime.now()}") self.input_df = self._read_input_data() # Apply event equalization, if requested if self.config_obj.use_ee is True: - self.bar_logger.info(f"Performing event equalization: {datetime.now()}") + self.logger.info(f"Performing event equalization: {datetime.now()}") self.input_df = calc_util.perform_event_equalization(self.parameters, self.input_df) - self.bar_logger.info(f"End event equalization: {datetime.now()}") + self.logger.info(f"End event equalization: {datetime.now()}") # Create a list of series objects. # Each series object contains all the necessary information for plotting, @@ -91,9 +89,9 @@ def __init__(self, parameters: dict) -> None: # Need to have a self.figure that we can pass along to # the methods in base_plot.py (BasePlot class methods) to # create binary versions of the plot. - self.bar_logger.info(f"Begin creating the figure: {datetime.now()}") + self.logger.info(f"Begin creating the figure: {datetime.now()}") self._create_figure() - self.bar_logger.info(f"End creating the figure: {datetime.now()}") + self.logger.info(f"End creating the figure: {datetime.now()}") def __repr__(self): """ Implement repr which can be useful for debugging this @@ -114,7 +112,7 @@ def _read_input_data(self): Returns: """ - self.bar_logger.info(f"Finished reading input data: " + self.logger.info(f"Finished reading input data: " f"{datetime.now()}") return pd.read_csv(self.config_obj.parameters['stat_input'], sep='\t', header='infer', float_precision='round_trip') @@ -516,28 +514,7 @@ def main(config_filename=None): @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = Bar(docs) - plot.save_to_file() - # plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.bar_logger.info(f"Finished bar plot at {datetime.now()}") - except ValueError as val_er: - print(val_er) + util.make_plot(config_filename, Bar) if __name__ == "__main__": diff --git a/metplotpy/plots/base_plot.py b/metplotpy/plots/base_plot.py index 106bf3a1..de0458a2 100644 --- a/metplotpy/plots/base_plot.py +++ b/metplotpy/plots/base_plot.py @@ -74,7 +74,6 @@ def __init__(self, parameters, default_conf_filename): self.remove_file() self.config_obj = Config(self.parameters) - def get_image_format(self): """Reads the image format type from user provided image name. Uses file extension as a type. If the file extension is not valid - @@ -340,8 +339,6 @@ def save_to_file(self): Returns: """ - - image_name = self.get_config_value('plot_filename') # Suppress deprecation warnings from third-party packages that are not in our control. @@ -388,8 +385,8 @@ def show_in_browser(self): if self.figure: self.figure.show() else: - self.logger.error(f" Figure not created. Nothing to show in the " - f"browser. ") + self.logger.error(" Figure not created. Nothing to show in the " + "browser. ") print("Oops! The figure was not created. Can't show") def _add_lines(self, config_obj: Config, x_points_index: Union[list, None] = None) -> None: diff --git a/metplotpy/plots/box/box.py b/metplotpy/plots/box/box.py index bff95601..6c41762a 100644 --- a/metplotpy/plots/box/box.py +++ b/metplotpy/plots/box/box.py @@ -20,7 +20,6 @@ from typing import Union from operator import add from itertools import chain -import yaml import pandas as pd import plotly.graph_objects as go @@ -57,13 +56,13 @@ def __init__(self, parameters): # config file that represents the BasePlot object (Box). self.config_obj = BoxConfig(self.parameters) - self.box_logger = self.config_obj.logger + self.logger = self.config_obj.logger - self.box_logger.info(f"Start bar plot at {datetime.now()}") + self.logger.info(f"Start bar plot at {datetime.now()}") # Check that we have all the necessary settings for each series is_config_consistent = self.config_obj._config_consistency_check() - self.box_logger.info("Checking consistency of user_legends, colors, etc...") + self.logger.info("Checking consistency of user_legends, colors, etc...") if not is_config_consistent: raise ValueError("The number of series defined by series_val_1/2 and derived curves is" " inconsistent with the number of settings" @@ -77,9 +76,9 @@ def __init__(self, parameters): # Apply event equalization, if requested if self.config_obj.use_ee is True: - self.box_logger.info(f"Start event equalization: {datetime.now()}") + self.logger.info(f"Start event equalization: {datetime.now()}") self.input_df = calc_util.perform_event_equalization(self.parameters, self.input_df) - self.box_logger.info(f"Finish event equalization: {datetime.now()}") + self.logger.info(f"Finish event equalization: {datetime.now()}") # Create a list of series objects. # Each series object contains all the necessary information for plotting, @@ -129,7 +128,7 @@ def _create_series(self, input_data): """ - self.box_logger.info(f"Begin generating series objects: " + self.logger.info(f"Begin generating series objects: " f"{datetime.now()}") series_list = [] @@ -173,14 +172,14 @@ def _create_series(self, input_data): # reorder series series_list = self.config_obj.create_list_by_series_ordering(series_list) - self.box_logger.info(f"End generating series objects: " + self.logger.info(f"End generating series objects: " f"{datetime.now()}") return series_list def _create_figure(self): """ Create a box plot from default and custom parameters""" - self.box_logger.info(f"Begin creating the figure: " + self.logger.info(f"Begin creating the figure: " f"{datetime.now()}") self.figure = self._create_layout() self._add_xaxis() @@ -230,7 +229,7 @@ def _create_figure(self): self.figure.update_layout(boxmode='group') - self.box_logger.info(f"End creating the figure: " + self.logger.info(f"End creating the figure: " f"{datetime.now()}") def _draw_series(self, series: BoxSeries) -> None: @@ -240,7 +239,7 @@ def _draw_series(self, series: BoxSeries) -> None: :param series: Line series object with data and parameters """ - self.box_logger.info(f"Begin drawing the boxes on the plot for " + self.logger.info(f"Begin drawing the boxes on the plot for " f"{series.series_name}: " f"{datetime.now()}") # defaults markers and colors for the regular box plot @@ -284,7 +283,7 @@ def _draw_series(self, series: BoxSeries) -> None: secondary_y=series.y_axis != 1 ) - self.box_logger.info(f"End drawing the boxes on the plot: " + self.logger.info(f"End drawing the boxes on the plot: " f"{datetime.now()}") @staticmethod @@ -299,7 +298,7 @@ def _find_min_max(series: BoxSeries, yaxis_min: Union[float, None], :param yaxis_max: previously calculated max value :return: a tuple with calculated min/max """ - self.box_logger.info(f"Begin finding min and max CI values: " + self.logger.info(f"Begin finding min and max CI values: " f"{datetime.now()}") # calculate series upper and lower limits of CIs indexes = range(len(series.series_points['dbl_med'])) @@ -311,7 +310,7 @@ def _find_min_max(series: BoxSeries, yaxis_min: Union[float, None], if yaxis_min is None or yaxis_max is None: return min(low_range), max(upper_range) - self.box_logger.info(f"End finding min and max CI values: " + self.logger.info(f"End finding min and max CI values: " f"{datetime.now()}") return min(chain([yaxis_min], low_range)), max(chain([yaxis_max], upper_range)) @@ -577,7 +576,7 @@ def write_html(self) -> None: # save html self.figure.write_html(html_name, include_plotlyjs=False) - self.box_logger.info(f"End writing HTML file: " + self.logger.info(f"End writing HTML file: " f"{datetime.now()}") def write_output_file(self) -> None: @@ -637,29 +636,7 @@ def main(config_filename=None): Args: @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./custom_box.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = Box(docs) - plot.save_to_file() - #plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.box_logger.info(f"Finished box plot at {datetime.now()}") - except ValueError as ve: - print(ve) + util.make_plot(config_filename, Box) if __name__ == "__main__": diff --git a/metplotpy/plots/config/histogram_defaults.yml b/metplotpy/plots/config/histogram_defaults.yaml similarity index 100% rename from metplotpy/plots/config/histogram_defaults.yml rename to metplotpy/plots/config/histogram_defaults.yaml diff --git a/metplotpy/plots/config/tcmpr_defaults.yaml b/metplotpy/plots/config/tcmpr_defaults.yaml index d0be7620..8d7fb0d4 100644 --- a/metplotpy/plots/config/tcmpr_defaults.yaml +++ b/metplotpy/plots/config/tcmpr_defaults.yaml @@ -145,3 +145,5 @@ subtitle: '' prefix: baseline_file: ./hfip_baseline.dat column_info_file: ./plot_tcmpr_hdr.dat +is_tcdiag_linetype: False +connect_points: False diff --git a/metplotpy/plots/contour/contour.py b/metplotpy/plots/contour/contour.py index 08dfd191..353a1d72 100644 --- a/metplotpy/plots/contour/contour.py +++ b/metplotpy/plots/contour/contour.py @@ -18,7 +18,6 @@ import re import csv -import yaml import pandas as pd import plotly.graph_objects as go @@ -59,15 +58,14 @@ def __init__(self, parameters: dict) -> None: # config file that represents the BasePlot object (Line). self.config_obj = ContourConfig(self.parameters) - - self.contour_logger = self.config_obj.logger - self.contour_logger.info(f"Start contour plot: {datetime.now()}") + self.logger = self.config_obj.logger + self.logger.info(f"Start contour plot: {datetime.now()}") # Check that we have all the necessary settings for each series - self.contour_logger.info("Consistency checking of config settings for colors,legends, etc.") + self.logger.info("Consistency checking of config settings for colors,legends, etc.") is_config_consistent = self.config_obj._config_consistency_check() if not is_config_consistent: - self.contour_logger.error("ValueError: The number of series defined by " + self.logger.error("ValueError: The number of series defined by " "series_val_1 is inconsistent with the number of " "settings required for describing each series. " "Please check the number of your configuration" @@ -81,15 +79,15 @@ def __init__(self, parameters: dict) -> None: " colors settings.") # Read in input data, location specified in config file - self.contour_logger.info(f"Begin reading input data: {datetime.now()}") + self.logger.info(f"Begin reading input data: {datetime.now()}") self.input_df = self._read_input_data() # Apply event equalization, if requested if self.config_obj.use_ee is True: - self.contour_logger.info(f"Begin event equalization: {datetime.now()} ") + self.logger.info(f"Begin event equalization: {datetime.now()} ") self.input_df = calc_util.perform_event_equalization(self.parameters, self.input_df) - self.contour_logger.info(f"Event equalization complete: {datetime.now()}") + self.logger.info(f"Event equalization complete: {datetime.now()}") # Create a list of series objects. # Each series object contains all the necessary information for plotting, @@ -140,7 +138,7 @@ def _create_series(self, input_data): """ series_list = [] - self.contour_logger.info(f"Generating series objects: {datetime.now()}") + self.logger.info(f"Generating series objects: {datetime.now()}") # add series for y1 axis num_series_y1 = len(self.config_obj.get_series_y()) for i, name in enumerate(self.config_obj.get_series_y()): @@ -150,7 +148,7 @@ def _create_series(self, input_data): # reorder series series_list = self.config_obj.create_list_by_series_ordering(series_list) - self.contour_logger.info(f"Finished creating series objects: {datetime.now()}") + self.logger.info(f"Finished creating series objects: {datetime.now()}") return series_list def _create_figure(self): @@ -158,7 +156,7 @@ def _create_figure(self): Create a Contour plot from defaults and custom parameters """ - self.contour_logger.info(f"Creating the figure: {datetime.now()}") + self.logger.info(f"Creating the figure: {datetime.now()}") # create and draw the plot self.figure = self._create_layout() self._add_xaxis() @@ -201,7 +199,7 @@ def _create_figure(self): 'ticks': "outside" } ) - self.contour_logger.info(f"Figure creating complete: {datetime.now()}") + self.logger.info(f"Figure creating complete: {datetime.now()}") def _draw_series(self, series: Series) -> None: """ @@ -209,7 +207,7 @@ def _draw_series(self, series: Series) -> None: :param series: Contour series object with data and parameters """ - self.contour_logger.info(f"Drawing the data: {datetime.now()}") + self.logger.info(f"Drawing the data: {datetime.now()}") line_width = self.config_obj.linewidth_list[series.idx] if self.config_obj.add_contour_overlay is False: line_width = 0 @@ -249,7 +247,7 @@ def _draw_series(self, series: Series) -> None: zauto=zauto ) ) - self.contour_logger.info(f"Finished drawing data: {datetime.now()}") + self.logger.info(f"Finished drawing data: {datetime.now()}") def _create_layout(self) -> Figure: """ @@ -390,7 +388,7 @@ def write_output_file(self) -> None: """ saves series points to the files """ - self.contour_logger.info(f"Writing output file: {datetime.now()}") + self.logger.info(f"Writing output file: {datetime.now()}") # Open file, name it based on the stat_input config setting, # (the input data file) except replace the .data @@ -425,7 +423,7 @@ def write_output_file(self) -> None: file.writelines('\n') file.writelines('\n') file.close() - self.contour_logger.info(f"Finished writing output file: {datetime.now()}") + self.logger.info(f"Finished writing output file: {datetime.now()}") def main(config_filename=None): @@ -437,29 +435,7 @@ def main(config_filename=None): Args: @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./custom_line_plot.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = Contour(docs) - plot.save_to_file() - # plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.contour_logger.info(f"Finished contour plot at {datetime.now()}") - except ValueError as val_er: - print(val_er) + util.make_plot(config_filename, Contour) if __name__ == "__main__": diff --git a/metplotpy/plots/eclv/eclv.py b/metplotpy/plots/eclv/eclv.py index 6b0a7a55..5860b572 100644 --- a/metplotpy/plots/eclv/eclv.py +++ b/metplotpy/plots/eclv/eclv.py @@ -17,13 +17,13 @@ import csv from operator import add from typing import Union -import yaml import itertools import plotly.graph_objects as go from datetime import datetime from metcalcpy.event_equalize import event_equalize + from metplotpy.plots.base_plot import BasePlot from metplotpy.plots.constants import PLOTLY_AXIS_LINE_COLOR, PLOTLY_AXIS_LINE_WIDTH from metplotpy.plots.eclv.eclv_config import EclvConfig @@ -60,14 +60,14 @@ def __init__(self, parameters: dict) -> None: # config file that represents the BasePlot object (Eclv). self.config_obj = EclvConfig(self.parameters) - self.eclv_logger = self.config_obj.logger + self.logger = self.config_obj.logger - self.eclv_logger.info(f"Start eclv plot: {datetime.now()}") + self.logger.info(f"Start eclv plot: {datetime.now()}") # Check that we have all the necessary settings for each series is_config_consistent = self.config_obj._config_consistency_check() if not is_config_consistent: - self.line_logger.error("ValueError: The number of series defined by " + self.logger.error("ValueError: The number of series defined by " "series_val_1 is " "inconsistent with the number of settings " "required for" @@ -85,12 +85,12 @@ def __init__(self, parameters: dict) -> None: " colors, and series_symbols settings.") # Read in input data, location specified in config file - self.eclv_logger.info(f"Begin reading input data: {datetime.now()}") + self.logger.info(f"Begin reading input data: {datetime.now()}") self.input_df = self._read_input_data() # Apply event equalization, if requested if self.config_obj.use_ee is True: - self.line_logger.info(f"Performing event equalization: {datetime.now()}") + self.logger.info(f"Performing event equalization: {datetime.now()}") fix_vals_permuted_list = [] for key in self.config_obj.fixed_vars_vals_input: @@ -106,7 +106,7 @@ def __init__(self, parameters: dict) -> None: self.parameters['series_val_1'], fix_vals_keys, fix_vals_permuted_list, True, True) - self.eclv_logger.info(f"End even equalization: {datetime.now()}") + self.logger.info(f"End even equalization: {datetime.now()}") # Create a list of series objects. # Each series object contains all the necessary information for plotting, @@ -119,9 +119,9 @@ def __init__(self, parameters: dict) -> None: # Need to have a self.figure that we can pass along to # the methods in base_plot.py (BasePlot class methods) to # create binary versions of the plot. - self.eclv_logger.info(f"Begin creating the figure: {datetime.now()}") + self.logger.info(f"Begin creating the figure: {datetime.now()}") self._create_figure() - self.eclv_logger.info(f"End creating the figure: {datetime.now()}") + self.logger.info(f"End creating the figure: {datetime.now()}") def __repr__(self): """ Implement repr which can be useful for debugging this @@ -150,7 +150,7 @@ def _create_series(self, input_data): """ - self.eclv_logger.info(f"Begin creating series objects: {datetime.now()}") + self.logger.info(f"Begin creating series objects: {datetime.now()}") series_list = [] # add series for y1 axis @@ -163,7 +163,7 @@ def _create_series(self, input_data): # reorder series series_list = self.config_obj.create_list_by_series_ordering(series_list) - self.eclv_logger.info(f"Finished creating series objects:" + self.logger.info(f"Finished creating series objects:" f" {datetime.now()}") return series_list @@ -171,7 +171,7 @@ def _create_figure(self): """ Create a eclv plot from defaults and custom parameters """ - self.eclv_logger.info(f"Begin creating the figure: {datetime.now()}") + self.logger.info(f"Begin creating the figure: {datetime.now()}") # create and draw the plot self.figure = self._create_layout() self._add_xaxis() @@ -227,7 +227,7 @@ def _create_figure(self): # add x2 axis self._add_x2axis(n_stats) - self.eclv_logger.info(f"Finished creating the figure: {datetime.now()}") + self.logger.info(f"Finished creating the figure: {datetime.now()}") def _add_x2axis(self, n_stats) -> None: """ @@ -287,7 +287,7 @@ def _draw_series(self, series: Series, :param series: Eclv series object with data and parameters :param x_points_index_adj: values for adjusting x-values position """ - self.eclv_logger.info(f"Begin drawing the series : {datetime.now()}") + self.logger.info(f"Begin drawing the series : {datetime.now()}") # pct series can have mote than one line for ind, series_points in enumerate(series.series_points): y_points = series_points['dbl_med'] @@ -334,7 +334,7 @@ def _draw_series(self, series: Series, secondary_y=False ) - self.eclv_logger.info(f"Finished drawing the series :" + self.logger.info(f"Finished drawing the series :" f" {datetime.now()}") def write_output_file(self) -> None: @@ -342,7 +342,7 @@ def write_output_file(self) -> None: saves series points to the files """ - self.eclv_logger.info(f"Begin writing output file: {datetime.now()}") + self.logger.info(f"Begin writing output file: {datetime.now()}") # Open file, name it based on the stat_input config setting, # (the input data file) except replace the .data @@ -381,7 +381,7 @@ def write_output_file(self) -> None: file.writelines('\n') file.writelines('\n') file.close() - self.eclv_logger.info(f"Finished writing output file: {datetime.now()}") + self.logger.info(f"Finished writing output file: {datetime.now()}") def main(config_filename=None): @@ -394,29 +394,7 @@ def main(config_filename=None): @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./custom_eclv_plot.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = Eclv(docs) - plot.save_to_file() - # plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.eclv_logger.info(f"Finished ECLV plot: {datetime.now()}") - except ValueError as val_er: - print(val_er) + util.make_plot(config_filename, Eclv) if __name__ == "__main__": diff --git a/metplotpy/plots/ens_ss/ens_ss.py b/metplotpy/plots/ens_ss/ens_ss.py index 4d4e490e..d6d153ee 100644 --- a/metplotpy/plots/ens_ss/ens_ss.py +++ b/metplotpy/plots/ens_ss/ens_ss.py @@ -18,7 +18,6 @@ import re import itertools -import yaml import numpy as np import pandas as pd @@ -59,14 +58,14 @@ def __init__(self, parameters: dict) -> None: # config file that represents the BasePlot object (EnsSs). self.config_obj = EnsSsConfig(self.parameters) - self.ens_logger = self.config_obj.logger - self.ens_logger.info(f"Start Ens_ss plot: {datetime.now()}") + self.logger = self.config_obj.logger + self.logger.info(f"Start Ens_ss plot: {datetime.now()}") # Check that we have all the necessary settings for each series - self.ens_logger.info(f"Consistency checking of config settings for colors, " + self.logger.info(f"Consistency checking of config settings for colors, " f"legends, etc.{datetime.now()}") is_config_consistent = self.config_obj._config_consistency_check() - self.ens_logger.info(f"Finished consistency checking of config settings for colors, " + self.logger.info(f"Finished consistency checking of config settings for colors, " f"legends, etc.{datetime.now()}") if not is_config_consistent: value_error_msg = ("ValueError: The number of series defined by " @@ -76,18 +75,18 @@ def __init__(self, parameters: dict) -> None: "check the number of your configuration file's " "plot_i, plot_disp, series_order, user_legend, show_legend and " "colors settings.") - self.ens_logger.error(value_error_msg) + self.logger.error(value_error_msg) raise ValueError(value_error_msg) # Read in input data, location specified in config file - self.ens_logger.info(f"Begin reading input data: {datetime.now()}") + self.logger.info(f"Begin reading input data: {datetime.now()}") self.input_df = self._read_input_data() # Apply event equalization, if requested if self.config_obj.use_ee is True: - self.ens_logger.info(f"Performing event equalization: {datetime.now()}") + self.logger.info(f"Performing event equalization: {datetime.now()}") self._perform_event_equalization() - self.ens_logger.info(f"Finished event equalization: {datetime.now()}") + self.logger.info(f"Finished event equalization: {datetime.now()}") # Create a list of series objects. # Each series object contains all the necessary information for plotting, @@ -197,7 +196,7 @@ def _create_series(self, input_data): """ - self.ens_logger.info(f"Begin creating series objects: {datetime.now()}") + self.logger.info(f"Begin creating series objects: {datetime.now()}") series_list = [] @@ -211,7 +210,7 @@ def _create_series(self, input_data): # reorder series series_list = self.config_obj.create_list_by_series_ordering(series_list) - self.ens_logger.info(f"Finished creating series objects:" + self.logger.info(f"Finished creating series objects:" f" {datetime.now()}") return series_list @@ -221,7 +220,7 @@ def _create_figure(self): Create a Ensemble spread-skill plot from defaults and custom parameters """ - self.ens_logger.info(f"Begin creating the figure: {datetime.now()}") + self.logger.info(f"Begin creating the figure: {datetime.now()}") # create and draw the plot self.figure = self._create_layout() @@ -253,7 +252,7 @@ def _create_figure(self): self._yaxis_limits() self._y2axis_limits() - self.ens_logger.info(f"Finished creating the figure: {datetime.now()}") + self.logger.info(f"Finished creating the figure: {datetime.now()}") def _add_y2axis(self) -> None: """ @@ -293,7 +292,7 @@ def _draw_series(self, series: EnsSsSeries) -> None: :param series: EnsSs series object with data and parameters """ - self.ens_logger.info(f"Begin drawing the series on the plot:" + self.logger.info(f"Begin drawing the series on the plot:" f" {datetime.now()}") # add the plot @@ -335,7 +334,7 @@ def _draw_series(self, series: EnsSsSeries) -> None: secondary_y=True ) - self.ens_logger.info(f"Finished drawing the series on the plot:" + self.logger.info(f"Finished drawing the series on the plot:" f" {datetime.now()}") def _create_layout(self) -> Figure: @@ -557,28 +556,7 @@ def main(config_filename=None): Args: @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = EnsSs(docs) - plot.save_to_file() - # plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.ens_logger.info(f"Finished EnsSs plot: {datetime.now()}") - except ValueError as val_er: - print(val_er) + util.make_plot(config_filename, EnsSs) if __name__ == "__main__": diff --git a/metplotpy/plots/equivalence_testing_bounds/equivalence_testing_bounds.py b/metplotpy/plots/equivalence_testing_bounds/equivalence_testing_bounds.py index 698ab88c..5d3798c0 100644 --- a/metplotpy/plots/equivalence_testing_bounds/equivalence_testing_bounds.py +++ b/metplotpy/plots/equivalence_testing_bounds/equivalence_testing_bounds.py @@ -39,6 +39,7 @@ class EquivalenceTestingBounds(BasePlot): """ Generates a Plotly Equivalence Testing Bounds plot . """ + LONG_NAME = 'Equivalence Testing Bounds' def __init__(self, parameters: dict) -> None: """ Creates a Plotly Equivalence Testing Bounds plot, based on @@ -57,8 +58,8 @@ def __init__(self, parameters: dict) -> None: # config file that represents the BasePlot object (EquivalenceTestingBounds). self.config_obj = LineConfig(self.parameters) - self.eq_logger = self.config_obj.logger - self.eq_logger.info(f"Start equivalence testing bounds: {datetime.now()}") + self.logger = self.config_obj.logger + self.logger.info(f"Start equivalence testing bounds: {datetime.now()}") # Check that we have all the necessary settings for each series is_config_consistent = self.config_obj._config_consistency_check() @@ -69,12 +70,12 @@ def __init__(self, parameters: dict) -> None: " the number of your configuration file's plot_i," " plot_disp, series_order, user_legend," " colors, show_legend and series_symbols settings.") - self.eq_logger.error(f"ValueError: {error_msg}: {datetime.now()}") + self.logger.error(f"ValueError: {error_msg}: {datetime.now()}") raise ValueError(error_msg) # Read in input data, location specified in config file self.input_df = self._read_input_data() - self.eq_logger.info(f"Finished reading input data: {datetime.now()}") + self.logger.info(f"Finished reading input data: {datetime.now()}") # Apply event equalization, if requested if self.config_obj.use_ee is True: @@ -113,7 +114,7 @@ def _read_input_data(self): Returns: """ - self.eq_logger.info(f"Begin reading input data: {datetime.now()}") + self.logger.info(f"Begin reading input data: {datetime.now()}") return pd.read_csv(self.config_obj.parameters['stat_input'], sep='\t', header='infer', float_precision='round_trip') @@ -134,7 +135,7 @@ def _create_series(self, input_data): """ - self.eq_logger.info(f"Creating series object: {datetime.now()}") + self.logger.info(f"Creating series object: {datetime.now()}") series_list = [] # add series for y1 axis @@ -184,7 +185,7 @@ def _create_series(self, input_data): # reorder series series_list = self.config_obj.create_list_by_series_ordering(series_list) - self.eq_logger.info(f"Finished creating series object:" + self.logger.info(f"Finished creating series object:" f" {datetime.now()}") return series_list @@ -192,7 +193,7 @@ def _create_figure(self): """ Create an Equivalence Testing Bounds plot from defaults and custom parameters """ - self.eq_logger.info(f"Creating the figure: {datetime.now()}") + self.logger.info(f"Creating the figure: {datetime.now()}") # create and draw the plot self.figure = self._create_layout() @@ -211,7 +212,7 @@ def _create_figure(self): self._draw_series(series, ind) ind = ind + 1 - self.eq_logger.info(f"Finished creating the figure: {datetime.now()}") + self.logger.info(f"Finished creating the figure: {datetime.now()}") def _draw_series(self, series: LineSeries, ind: int) -> None: """ @@ -221,7 +222,7 @@ def _draw_series(self, series: LineSeries, ind: int) -> None: :param x_points_index_adj: values for adjusting x-values position """ - self.eq_logger.info(f"Start drawing the lines on the plot: {datetime.now()}") + self.logger.info(f"Start drawing the lines on the plot: {datetime.now()}") ci_tost_up = series.series_points['ci_tost'][1] ci_tost_lo = series.series_points['ci_tost'][0] dif = series.series_points['dif'] @@ -279,7 +280,7 @@ def _draw_series(self, series: LineSeries, ind: int) -> None: } ) - self.eq_logger.info(f"Finished drawing the lines on the plot: {datetime.now()}") + self.logger.info(f"Finished drawing the lines on the plot: {datetime.now()}") def _create_layout(self) -> Figure: """ @@ -500,7 +501,7 @@ def write_html(self) -> None: Plotly.js """ - self.eq_logger.info(f"Write html file: {datetime.now()}") + self.logger.info(f"Write html file: {datetime.now()}") if self.config_obj.create_html is True: # construct the fle name from plot_filename @@ -510,14 +511,14 @@ def write_html(self) -> None: # save html self.figure.write_html(html_name, include_plotlyjs=False) - self.eq_logger.info(f"Finished writing html file: {datetime.now()}") + self.logger.info(f"Finished writing html file: {datetime.now()}") def write_output_file(self) -> None: """ Formats y1 and y2 series point data to the 2-dim arrays and saves them to the files """ - self.eq_logger.info(f"Write output file: {datetime.now()}") + self.logger.info(f"Write output file: {datetime.now()}") # if points_path parameter doesn't exist, # open file, name it based on the stat_input config setting, @@ -559,7 +560,7 @@ def write_output_file(self) -> None: # save points self._save_points(ci_tost_df.values.tolist(), filename) - self.eq_logger.info(f"Finished writing the output file: {datetime.now()}") + self.logger.info(f"Finished writing the output file: {datetime.now()}") @staticmethod def _save_points(points: list, output_file: str) -> None: @@ -600,29 +601,7 @@ def main(config_filename=None): Args: @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./custom_line_plot.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = EquivalenceTestingBounds(docs) - plot.save_to_file() - # plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.eq_logger.info(f"Finished equivalence testing bounds: {datetime.now()}") - except ValueError as val_er: - print(val_er) + util.make_plot(config_filename, EquivalenceTestingBounds) if __name__ == "__main__": diff --git a/metplotpy/plots/histogram/hist.py b/metplotpy/plots/histogram/hist.py index 3839e8a4..a3012a5c 100644 --- a/metplotpy/plots/histogram/hist.py +++ b/metplotpy/plots/histogram/hist.py @@ -64,15 +64,15 @@ def __init__(self, parameters: dict) -> None: getattr(sys.modules['metplotpy.plots.histogram.hist_config'], self.config_obj_name)(self.parameters) - self.hist_logger = self.config_obj.logger - self.hist_logger.info(f"Begin [rank|probability|relative frequency] histogram:" + self.logger = self.config_obj.logger + self.logger.info(f"Begin [rank|probability|relative frequency] histogram:" f" {datetime.now()}") # Check that we have all the necessary settings for each ser - self.hist_logger.info(f"Performing consistency check for settings in config " + self.logger.info(f"Performing consistency check for settings in config " f"file: {datetime.now()}") is_config_consistent = self.config_obj._config_consistency_check() - self.hist_logger.info(f"Finished with consistency check: {datetime.now()}") + self.logger.info(f"Finished with consistency check: {datetime.now()}") if not is_config_consistent: error_msg = ("The number of ser defined by series_val_1 is" " inconsistent with the number of settings" @@ -80,18 +80,18 @@ def __init__(self, parameters: dict) -> None: " the number of your configuration file's " " plot_disp, series_order, user_legend, show_legend" " colors settings.") - self.hist_logger.error(f"ValueError: {error_msg}") + self.logger.error(f"ValueError: {error_msg}") raise ValueError(error_msg) # Read in input data, location specified in config file self.input_df = self._read_input_data() - self.hist_logger.info(f"Finished reading input data: {datetime.now()}") + self.logger.info(f"Finished reading input data: {datetime.now()}") # Apply event equalization, if requested if self.config_obj.use_ee is True: - self.hist_logger.info(f"Begin event equalization: {datetime.now()}") + self.logger.info(f"Begin event equalization: {datetime.now()}") self._perform_event_equalization() - self.hist_logger.info(f"Event equalization complete: {datetime.now()}") + self.logger.info(f"Event equalization complete: {datetime.now()}") # Create a list of ser objects. # Each ser object contains all the necessary information for plotting, @@ -180,7 +180,7 @@ def _perform_event_equalization(self): self.input_df = input_df_ee.drop('equalize', axis=1) - def _read_input_data(self): + def _read_input_data(self): """ Read the input data file and store as a pandas dataframe so we can subset the @@ -193,7 +193,7 @@ def _read_input_data(self): """ - self.hist_logger.info(f"Reading input data: {datetime.now()}") + self.logger.info(f"Reading input data: {datetime.now()}") return pd.read_csv(self.config_obj.parameters['stat_input'], sep='\t', header='infer', float_precision='round_trip') @@ -218,7 +218,7 @@ def _create_series(self, input_data): """ - self.hist_logger.info(f"Creating the series objects: {datetime.now()}") + self.logger.info(f"Creating the series objects: {datetime.now()}") series_list = [] hist_series_type = \ getattr(sys.modules['metplotpy.plots.histogram.hist_series'], @@ -247,14 +247,14 @@ def _create_series(self, input_data): # reorder ser series_list = self.config_obj.create_list_by_series_ordering(series_list) - self.hist_logger.info(f"Finished creating the series objects: {datetime.now()}") + self.logger.info(f"Finished creating the series objects: {datetime.now()}") return series_list def _create_figure(self): """ Create a box plot from defaults and custom parameters """ - self.hist_logger.info(f"Begin creating the histogram figure: {datetime.now()}") + self.logger.info(f"Begin creating the histogram figure: {datetime.now()}") # create and draw the plot self.figure = self._create_layout() self._add_xaxis() @@ -271,7 +271,7 @@ def _create_figure(self): self.config_obj ) - self.hist_logger.info(f"Finished creating the histogram figure: " + self.logger.info(f"Finished creating the histogram figure: " f"{datetime.now()}") def _draw_series(self, series: HistSeries) -> None: @@ -299,7 +299,7 @@ def _create_layout(self) -> Figure: :return: Figure object """ - self.hist_logger.info(f"Creating the layout: {datetime.now()}") + self.logger.info(f"Creating the layout: {datetime.now()}") # create annotation annotation = [ @@ -344,14 +344,14 @@ def _create_layout(self) -> Figure: plot_bgcolor=PLOTLY_PAPER_BGCOOR ) - self.hist_logger.info(f"Finished creating the layout: {datetime.now()}") + self.logger.info(f"Finished creating the layout: {datetime.now()}") return fig def _add_xaxis(self) -> None: """ Configures and adds x-axis to the plot """ - self.hist_logger.info(f"Configuring and adding the x-axis: {datetime.now()}") + self.logger.info(f"Configuring and adding the x-axis: {datetime.now()}") self.figure.update_xaxes(title_text=self.config_obj.xaxis, linecolor=PLOTLY_AXIS_LINE_COLOR, @@ -370,7 +370,7 @@ def _add_xaxis(self) -> None: tickfont={'size': self.config_obj.x_tickfont_size}, dtick=self._get_dtick() ) - self.hist_logger.info(f"Finished configuring and adding the x-axis:" + self.logger.info(f"Finished configuring and adding the x-axis:" f" {datetime.now()}") def _add_yaxis(self) -> None: @@ -378,7 +378,7 @@ def _add_yaxis(self) -> None: Configures and adds y-axis to the plot """ - self.hist_logger.info(f"Configuring and adding the y-axis: {datetime.now()}") + self.logger.info(f"Configuring and adding the y-axis: {datetime.now()}") self.figure.update_yaxes(title_text= util.apply_weight_style(self.config_obj.yaxis_1, self.config_obj.parameters[ @@ -400,7 +400,7 @@ def _add_yaxis(self) -> None: tickangle=self.config_obj.y_tickangle, tickfont={'size': self.config_obj.y_tickfont_size} ) - self.hist_logger.info(f"Finished configuring and adding the y-axis:" + self.logger.info(f"Finished configuring and adding the y-axis:" f" {datetime.now()}") def _add_legend(self) -> None: @@ -409,7 +409,7 @@ def _add_legend(self) -> None: and attaches it to the initial Figure """ - self.hist_logger.info(f"Adding the legend: {datetime.now()}") + self.logger.info(f"Adding the legend: {datetime.now()}") self.figure.update_layout(legend={'x': self.config_obj.bbox_x, 'y': self.config_obj.bbox_y, 'xanchor': 'center', @@ -425,7 +425,7 @@ def _add_legend(self) -> None: 'color': "black" } }) - self.hist_logger.info(f"Finished adding the legend: {datetime.now()}") + self.logger.info(f"Finished adding the legend: {datetime.now()}") def write_html(self) -> None: """ @@ -433,7 +433,7 @@ def write_html(self) -> None: Plotly.js """ - self.hist_logger.info(f"Begin writing html: {datetime.now()}") + self.logger.info(f"Begin writing html: {datetime.now()}") if self.config_obj.create_html is True: # construct the file name from plot_filename @@ -444,13 +444,13 @@ def write_html(self) -> None: # save html self.figure.write_html(html_name, include_plotlyjs=False) - self.hist_logger.info(f"Finished writing html: {datetime.now()}") + self.logger.info(f"Finished writing html: {datetime.now()}") def write_output_file(self) -> None: """ saves box points to the file """ - self.hist_logger.info(f"Begin writing the output file: {datetime.now()}") + self.logger.info(f"Begin writing the output file: {datetime.now()}") # if points_path parameter doesn't exist, # open file, name it based on the stat_input config setting, @@ -481,4 +481,4 @@ def write_output_file(self) -> None: file.writelines('\n') file.close() - self.hist_logger.info(f"Finished writing the output file: {datetime.now()}") + self.logger.info(f"Finished writing the output file: {datetime.now()}") diff --git a/metplotpy/plots/histogram/histogram.py b/metplotpy/plots/histogram/histogram.py index f51bb0eb..231d333f 100644 --- a/metplotpy/plots/histogram/histogram.py +++ b/metplotpy/plots/histogram/histogram.py @@ -46,7 +46,7 @@ def __init__(self, parameters, data): ValueError: If the data array has dimension not equal 2. """ - default_conf_filename = "histogram_defaults.yml" + default_conf_filename = "histogram_defaults.yaml" # init common layout super().__init__(parameters, default_conf_filename) diff --git a/metplotpy/plots/histogram/prob_hist.py b/metplotpy/plots/histogram/prob_hist.py index 1f92a914..d4cd7aae 100644 --- a/metplotpy/plots/histogram/prob_hist.py +++ b/metplotpy/plots/histogram/prob_hist.py @@ -15,7 +15,6 @@ from typing import Union from datetime import datetime -import yaml from metplotpy.plots.histogram.hist import Hist from metplotpy.plots.histogram.hist_series import HistSeries @@ -26,12 +25,10 @@ class ProbHist(Hist): """ Generates a Plotly Probability Histogram or Histograms of probability integral transform plot for 1 or more traces """ - + LONG_NAME = 'probability histogram' config_obj_name = 'ProbHistogramConfig' series_obj = 'ProbHistSeries' - - def _get_x_points(self, series: HistSeries) -> list: x_points = [] for ser in self.series_list: @@ -66,30 +63,17 @@ def main(config_filename=None): Args: @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - + params = util.get_params(config_filename) try: - - plot = ProbHist(docs) + plot = ProbHist(params) plot.save_to_file() # plot.show_in_browser() plot.write_html() plot.write_output_file() log_level = plot.get_config_value('log_level') log_filename = plot.get_config_value('log_filename') - prob_logger = util.get_common_logger(log_level, log_filename) - prob_logger.info(f"Finished probability histogram: {datetime.now()}") + logger = util.get_common_logger(log_level, log_filename) + logger.info(f"Finished probability histogram: {datetime.now()}") except ValueError as val_er: print(val_er) diff --git a/metplotpy/plots/histogram/rank_hist.py b/metplotpy/plots/histogram/rank_hist.py index c0f0ce99..a9a054b9 100644 --- a/metplotpy/plots/histogram/rank_hist.py +++ b/metplotpy/plots/histogram/rank_hist.py @@ -23,13 +23,12 @@ class RankHist(Hist): """ Generates a Plotly Histograms of ensemble rank plot for 1 or more traces """ - + LONG_NAME = 'rank histogram' config_obj_name='RankHistogramConfig' series_obj='RankHistSeries' def _get_x_points(self, series: HistSeries) -> list: - self.hist_logger.info(f"Retrieving x points for rank histogram:" - f" {datetime.now()}") + self.logger.info(f"Retrieving x points for {self.LONG_NAME}: {datetime.now()}") return sorted(series.series_data['i_value'].unique()) @@ -42,28 +41,7 @@ def main(config_filename=None): Args: @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = RankHist(docs) - plot.save_to_file() - # plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.hist_logger.info(f"Finished creating rank histogram: {datetime.now()}") - except ValueError as val_er: - print(val_er) + util.make_plot(config_filename, RankHist) if __name__ == "__main__": diff --git a/metplotpy/plots/histogram/rel_hist.py b/metplotpy/plots/histogram/rel_hist.py index bebb7e84..3040451e 100644 --- a/metplotpy/plots/histogram/rel_hist.py +++ b/metplotpy/plots/histogram/rel_hist.py @@ -25,13 +25,12 @@ class RelHist(Hist): """ Generates a Plotly Relative Histogram or Histograms of relative position plot for 1 or more traces """ - + LONG_NAME = 'relative frequency histogram' config_obj_name='RelHistogramConfig' series_obj='RelHistSeries' def _get_x_points(self, series: HistSeries) -> list: - self.hist_logger.info(f"Retrieving x points for relative frequency histogram: " - f"{datetime.now()}") + self.logger.info(f"Retrieving x points for {self.LONG_NAME}: {datetime.now()}") return sorted(series.series_data['i_value'].unique()) @@ -45,29 +44,7 @@ def main(config_filename=None): Args: @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = RelHist(docs) - plot.save_to_file() - # plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.hist_logger.info(f"Finished creating the relative frequency histogram: " - f"{datetime.now()}") - except ValueError as val_er: - print(val_er) + util.make_plot(config_filename, RelHist) if __name__ == "__main__": diff --git a/metplotpy/plots/histogram_2d/histogram_2d.py b/metplotpy/plots/histogram_2d/histogram_2d.py index 648bd921..3f22aa06 100644 --- a/metplotpy/plots/histogram_2d/histogram_2d.py +++ b/metplotpy/plots/histogram_2d/histogram_2d.py @@ -169,9 +169,6 @@ def write_output_file(self): self.logger.info(f"Finished writing plot to output file: {datetime.now()}") - - - def _read_input_data(self): """ Read the input data file and store as an xarray @@ -187,32 +184,16 @@ def _read_input_data(self): try: ds = xr.open_dataset(self.input_file) except IOError: - print("Unable to open input file") + print(f"Unable to open input file: {self.input_file}") sys.exit(1) self.logger.info(f"Finished reading input data: {datetime.now()}") return ds def main(config_filename=None): - metplotpy_base = os.getenv('METPLOTPY_BASE') - if not metplotpy_base: - metplotpy_base = '' - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - + params = util.get_params(config_filename) try: - h = Histogram_2d(docs) + h = Histogram_2d(params) h.save_to_file() h.logger.info(f"Finished generating histogram 2D plot: {datetime.now()}") except ValueError as ve: diff --git a/metplotpy/plots/hovmoeller/hovmoeller.py b/metplotpy/plots/hovmoeller/hovmoeller.py index 59d5801d..b189a903 100644 --- a/metplotpy/plots/hovmoeller/hovmoeller.py +++ b/metplotpy/plots/hovmoeller/hovmoeller.py @@ -231,23 +231,7 @@ def main(config_filename=None): """ - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r'): - try: - # Use the METcalcpy parser to parse config files with environment - # variables that - # look like: - # input_file: !ENV '${ENV_NAME}/some_input_file.nc' - # This supports METplus hovmoeller use case. - config = metcalcpy.util.read_env_vars_in_config.parse_config(config_file) - except yaml.YAMLError as exc: - print(exc) - + config = util.get_params(config_filename) try: plot = Hovmoeller(config) plot.save_to_file() diff --git a/metplotpy/plots/line/line.py b/metplotpy/plots/line/line.py index 40cc05a4..21151dc4 100644 --- a/metplotpy/plots/line/line.py +++ b/metplotpy/plots/line/line.py @@ -65,9 +65,9 @@ def __init__(self, parameters: dict) -> None: # config file that represents the BasePlot object (Line). self.config_obj = LineConfig(self.parameters) - self.line_logger = self.config_obj.logger + self.logger = self.config_obj.logger - self.line_logger.info(f"Begin creating the line plot: {datetime.now()}") + self.logger.info(f"Begin creating the line plot: {datetime.now()}") # Check that we have all the necessary settings for each series is_config_consistent = self.config_obj._config_consistency_check() @@ -78,7 +78,7 @@ def __init__(self, parameters: dict) -> None: "the number of your configuration file's plot_ci, " "plot_disp, series_order, user_legend " "colors, series_symbols, show_legend settings.") - self.line_logger.error(f"ValueError: {error_msg}: {datetime.now()}") + self.logger.error(f"ValueError: {error_msg}: {datetime.now()}") raise ValueError(error_msg) # Read in input data, location specified in config file @@ -86,10 +86,10 @@ def __init__(self, parameters: dict) -> None: # Apply event equalization, if requested if self.config_obj.use_ee is True: - self.line_logger.info(f"Begin event equalization: {datetime.now()}") + self.logger.info(f"Begin event equalization: {datetime.now()}") self.input_df = calc_util.perform_event_equalization(self.parameters, self.input_df) - self.line_logger.info(f"Finished event equalization: {datetime.now()}") + self.logger.info(f"Finished event equalization: {datetime.now()}") # Create a list of series objects. # Each series object contains all the necessary information for plotting, @@ -147,7 +147,7 @@ def _create_series(self, input_data): """ - self.line_logger.info(f"Begin creating the series objects: {datetime.now()}") + self.logger.info(f"Begin creating the series objects: {datetime.now()}") series_list = [] # add series for y1 axis @@ -192,14 +192,14 @@ def _create_series(self, input_data): # reorder series series_list = self.config_obj.create_list_by_series_ordering(series_list) - self.line_logger.info(f"Finished creating the series objects: {datetime.now()}") + self.logger.info(f"Finished creating the series objects: {datetime.now()}") return series_list def _create_figure(self) -> None: """ Create a line plot from defaults and custom parameters """ - self.line_logger.info(f"Begin create the figure: {datetime.now()}") + self.logger.info(f"Begin create the figure: {datetime.now()}") # create and draw the plot self.figure = self._create_layout() @@ -270,7 +270,7 @@ def _create_figure(self) -> None: if self.config_obj.start_from_zero is True: self.figure.update_xaxes(range=[0, len(x_points_index) - 1]) - self.line_logger.info(f"Finished creating the figure: {datetime.now()}") + self.logger.info(f"Finished creating the figure: {datetime.now()}") def _draw_series(self, series: Series, x_points_index_adj: Union[list, None] = None) \ @@ -281,7 +281,7 @@ def _draw_series(self, series: Series, x_points_index_adj: Union[list, None] = :param series: Line series object with data and parameters :param x_points_index_adj: values for adjusting x-values position """ - self.line_logger.info(f"Begin drawing the lines on the plot: {datetime.now()}") + self.logger.info(f"Begin drawing the lines on the plot: {datetime.now()}") y_points = series.series_points['dbl_med'] # show or not ci @@ -354,7 +354,7 @@ def _draw_series(self, series: Series, x_points_index_adj: Union[list, None] = secondary_y=series.y_axis != 1 ) - self.line_logger.info(f"Finished drawing the lines on the plot:" + self.logger.info(f"Finished drawing the lines on the plot:" f" {datetime.now()}") def _create_layout(self) -> Figure: @@ -440,7 +440,7 @@ def _adjust_for_vertical(self, x_points_index: list) -> None: :param x_points_index: list of indexws for the original x -axis """ - self.line_logger.info(f"Begin switching x and y axis: {datetime.now()}") + self.logger.info(f"Begin switching x and y axis: {datetime.now()}") odered_indy_label = self.config_obj.create_list_by_plot_val_ordering( self.config_obj.indy_label) if self.config_obj.vert_plot is True: @@ -460,7 +460,7 @@ def _adjust_for_vertical(self, x_points_index: list) -> None: } ) - self.line_logger.info(f"Finished switching x and y axis: {datetime.now()}") + self.logger.info(f"Finished switching x and y axis: {datetime.now()}") def _add_xaxis(self) -> None: """ @@ -703,7 +703,7 @@ def write_output_file(self) -> None: files """ - self.line_logger.info(f"Begin writing to output file: {datetime.now()}") + self.logger.info(f"Begin writing to output file: {datetime.now()}") # if points_path parameter doesn't exist, # open file, name it based on the stat_input config setting, # (the input data file) except replace the .data @@ -753,7 +753,7 @@ def write_output_file(self) -> None: self._save_points(all_points_1, filename + ".points1") self._save_points(all_points_2, filename + ".points2") - self.line_logger.info(f"Finished writing to output file: {datetime.now()}") + self.logger.info(f"Finished writing to output file: {datetime.now()}") @staticmethod def _find_min_max(series: LineSeries, yaxis_min: Union[float, None], @@ -856,29 +856,7 @@ def main(config_filename=None): @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./custom_line_plot.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = Line(docs) - plot.save_to_file() - #plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.line_logger.info(f"Finished creating line plot: {datetime.now()}") - except ValueError as val_er: - print(val_er) + util.make_plot(config_filename, Line) if __name__ == "__main__": diff --git a/metplotpy/plots/mpr_plot/mpr_plot.py b/metplotpy/plots/mpr_plot/mpr_plot.py index 7f6d65a0..329d5b27 100644 --- a/metplotpy/plots/mpr_plot/mpr_plot.py +++ b/metplotpy/plots/mpr_plot/mpr_plot.py @@ -587,22 +587,9 @@ def main(config_filename=None): The location of the input data is defined in either the default or custom config file. """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./custom_line_plot.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - + params = util.get_params(config_filename) try: - plot = MprPlot(docs) + plot = MprPlot(params) plot.save_to_file() if plot.config_obj.show_in_browser: plot.show_in_browser() diff --git a/metplotpy/plots/performance_diagram/performance_diagram.py b/metplotpy/plots/performance_diagram/performance_diagram.py index 4507f6ef..e5f92ed2 100644 --- a/metplotpy/plots/performance_diagram/performance_diagram.py +++ b/metplotpy/plots/performance_diagram/performance_diagram.py @@ -21,7 +21,6 @@ from matplotlib.colors import LinearSegmentedColormap from matplotlib.font_manager import FontProperties import numpy as np -import yaml import pandas as pd from metplotpy.plots.base_plot import BasePlot import metcalcpy.util.utils as calc_util @@ -471,23 +470,10 @@ def main(config_filename=None): Returns: """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./custom_performance_diagram.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - + params = util.get_params(config_filename) try: # create a performance diagram - PerformanceDiagram(docs) + PerformanceDiagram(params) except ValueError as value_error: print(value_error) diff --git a/metplotpy/plots/reliability_diagram/reliability.py b/metplotpy/plots/reliability_diagram/reliability.py index 22764c86..b83f9572 100644 --- a/metplotpy/plots/reliability_diagram/reliability.py +++ b/metplotpy/plots/reliability_diagram/reliability.py @@ -19,7 +19,6 @@ from datetime import datetime from typing import Union -import yaml import numpy as np import pandas as pd @@ -731,29 +730,7 @@ def main(config_filename=None): Args: @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./custom_line_plot.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = Reliability(docs) - plot.save_to_file() - #plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.logger.info(f"Finished generating reliability diagram: {datetime.now()}") - except ValueError as val_er: - print(val_er) + util.make_plot(config_filename, Reliability) if __name__ == "__main__": diff --git a/metplotpy/plots/revision_box/revision_box.py b/metplotpy/plots/revision_box/revision_box.py index 3b7b5518..d9336712 100644 --- a/metplotpy/plots/revision_box/revision_box.py +++ b/metplotpy/plots/revision_box/revision_box.py @@ -13,7 +13,6 @@ import os import re from datetime import datetime -import yaml import plotly.graph_objects as go from metplotpy.plots.base_plot import BasePlot @@ -30,7 +29,7 @@ class RevisionBox(Box): """ Generates a Plotly Revision box plot for 1 or more boxes. """ - + LONG_NAME = 'revision box' defaults_name = 'revision_box_defaults.yaml' def __init__(self, parameters: dict) -> None: @@ -303,29 +302,7 @@ def main(config_filename=None): Args: @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./custom_revision_box.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = RevisionBox(docs) - plot.save_to_file() - # plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.logger.info(f"Finished revision box plot: {datetime.now()}") - except ValueError as ve: - print(ve) + util.make_plot(config_filename, RevisionBox) if __name__ == "__main__": diff --git a/metplotpy/plots/revision_series/revision_series.py b/metplotpy/plots/revision_series/revision_series.py index 8ef2fd02..acf7621e 100644 --- a/metplotpy/plots/revision_series/revision_series.py +++ b/metplotpy/plots/revision_series/revision_series.py @@ -38,7 +38,7 @@ class RevisionSeries(Line): """ Generates a Plotly Revision Series plot for 1 or more traces where each dot is represented by a text point data file. """ - + LONG_NAME = 'revision series' defaults_name = 'revision_series_defaults.yaml' def __init__(self, parameters: dict) -> None: @@ -59,7 +59,8 @@ def __init__(self, parameters: dict) -> None: # config file that represents the BasePlot object (RevisionSeries). self.config_obj = RevisionSeriesConfig(self.parameters) - self.config_obj.logger.info(f'Begin revision series plotting.') + self.logger = self.config_obj.logger + self.logger.info('Begin revision series plotting.') # Check that we have all the necessary settings for each series is_config_consistent = self.config_obj._config_consistency_check() @@ -70,7 +71,7 @@ def __init__(self, parameters: dict) -> None: " the number of your configuration file's plot_i," " plot_disp, series_order, user_legend," " colors, show_legend and series_symbols settings.") - self.config_obj.logger.error(f"ValueError: {value_error_msg}") + self.logger.error(f"ValueError: {value_error_msg}") raise ValueError(value_error_msg) # Read in input data, location specified in config file @@ -133,8 +134,7 @@ def _create_figure(self): Create a Revision Series plot from defaults and custom parameters """ - self.config_obj.logger.info(f"Begin creating the revision series figure:" - f" {datetime.now()}") + self.logger.info(f"Begin creating the {self.LONG_NAME} figure: {datetime.now()}") # create and draw the plot self.figure = self._create_layout() self._add_xaxis() @@ -179,8 +179,7 @@ def _create_figure(self): # apply y axis limits self._yaxis_limits() - self.config_obj.logger.info(f"Finish creating revision series figure: " - f"{datetime.now()}") + self.logger.info(f"Finish creating {self.LONG_NAME} figure: {datetime.now()}") def _draw_series(self, series: Series, x_points_index_adj: Union[list, None] = None) -> None: """ @@ -190,7 +189,7 @@ def _draw_series(self, series: Series, x_points_index_adj: Union[list, None] = N :param x_points_index_adj: values for adjusting x-values position """ - self.config_obj.logger.info(f"Draw the formatted series: {datetime.now()}") + self.logger.info(f"Draw the formatted series: {datetime.now()}") y_points = series.series_points['points']['stat_value'].tolist() # add the plot @@ -208,7 +207,7 @@ def _draw_series(self, series: Series, x_points_index_adj: Union[list, None] = N ), secondary_y=False ) - self.config_obj.logger.info(f"Finished drawing series: {datetime.now()}") + self.logger.info(f"Finished drawing series: {datetime.now()}") def _add_xaxis(self) -> None: """ @@ -239,7 +238,7 @@ def _calc_stag_adjustments(self) -> list: :return: the list of the adjustment values """ - self.config_obj.logger.info("Calculating the x-axis adjustment.") + self.logger.info("Calculating the x-axis adjustment.") # get the total number of series num_stag = len(self.config_obj.all_series_y1) @@ -260,7 +259,7 @@ def write_output_file(self) -> None: """ Formats y1 series point data and saves them to the files """ - self.config_obj.logger.info("Write output file") + self.logger.info("Write output file") # if points_path parameter doesn't exist, # open file, name it based on the stat_input config setting, # (the input data file) except replace the .data @@ -305,30 +304,7 @@ def main(config_filename=None): Args: @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./revision_series_defaults.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - try: - plot = RevisionSeries(docs) - plot.save_to_file() - #plot.show_in_browser() - plot.write_html() - plot.write_output_file() - plot.config_obj.logger.info(f"Finished creating revision series plot: " - f"{datetime.now()}") - except ValueError as val_er: - print(val_er) + util.make_plot(config_filename, RevisionSeries) if __name__ == "__main__": diff --git a/metplotpy/plots/roc_diagram/roc_diagram.py b/metplotpy/plots/roc_diagram/roc_diagram.py index 615a9157..c728e0c3 100644 --- a/metplotpy/plots/roc_diagram/roc_diagram.py +++ b/metplotpy/plots/roc_diagram/roc_diagram.py @@ -15,7 +15,6 @@ import os from datetime import datetime -import yaml import re import warnings # with warnings.catch_warnings(): @@ -108,7 +107,7 @@ def __init__(self, parameters): if len(self.series_list) > 0: self._add_lines(self.config_obj) - def _read_input_data(self): + def _read_input_data(self) -> pd.DataFrame: """ Read the input data file (either CTC or PCT linetype) and store as a pandas dataframe so we can subset the @@ -117,11 +116,60 @@ def _read_input_data(self): Args: - Returns: + Returns: input_df the dataframe representation of the input data """ self.logger.info("Reading input data.") - return pd.read_csv(self.config_obj.stat_input, sep='\t', header='infer') + # If self.config_obj.lineype_ctc is True, check for the presence of the fy_oy column. + # If present, proceed as usual, otherwise extract the fcst_thresh, fy_oy, fy_on, fn_on, and fn_oy data + # from the stat_name and stat_value columns (long to wide). + input_df = pd.read_csv(self.config_obj.stat_input, sep='\t', header='infer') + if self.config_obj.linetype_ctc: + # Check if there is a column name 'fy_oy'. If it is missing, then this data has been reformatted by + # the METdataio reformatter. + input_columns = input_df.columns.to_list() + if 'fy_oy' in input_columns: + # This data has been created from the METviewer database + return input_df + + else: + # This data was created by the METdataio reformatter and needs to be modified from long to wide format. + wide_input_df = self.ctc_long_to_wide(input_df) + return wide_input_df + else: + # PCT data + return input_df + + def ctc_long_to_wide(self, input_df: pd.DataFrame) -> pd.DataFrame: + """ + Convert the dataframe representation of the CTC linetype data (that was reformatted by METdataio) from long + to wide format. The fcst_thresh, fy_oy, fy_on, fn_oy, and fn_on will be in separate columns, + rather than residing under the stat_name and stat_value. + + Args: + @param input_df: The input dataframe that represents the CTC data reformatted by METdataio. + + Returns: ctc_df: a dataframe that has the additional columns: fy_oy, fy_on, fn_on, fn_oy, and + fcst_thresh extracted from the stat_name and stat_values columns + """ + + + # Use all the columns (except the stat_name, stat_value,stat_bcl, stat_bcu, stat_ncl, stat_ncu, + # and Idx column) as the pivot index + col_index = input_df.columns.to_list() + ignore_cols = ['Idx', 'stat_name', 'stat_value', 'stat_bcl', 'stat_bcu', 'stat_ncl', 'stat_ncu'] + for cur in ignore_cols: + if cur in col_index: + col_index.remove(cur) + df_wide = input_df.pivot(index=col_index, columns='stat_name', values='stat_value') + + # reset the index + reset_df_wide = df_wide.reset_index() + + # Convert all the header names (column labels) to all lower case + reset_df_wide.columns = [x.lower() for x in reset_df_wide.columns] + + return reset_df_wide def _create_series(self, input_data): """ @@ -515,22 +563,9 @@ def main(config_filename=None): @param config_filename: default is None, the name of the custom config file to apply Returns: """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./custom_performance_diagram.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - + params = util.get_params(config_filename) try: - r = ROCDiagram(docs) + r = ROCDiagram(params) r.save_to_file() r.write_html() diff --git a/metplotpy/plots/roc_diagram/roc_diagram_series.py b/metplotpy/plots/roc_diagram/roc_diagram_series.py index 7f900120..9d3f59c2 100644 --- a/metplotpy/plots/roc_diagram/roc_diagram_series.py +++ b/metplotpy/plots/roc_diagram/roc_diagram_series.py @@ -15,7 +15,6 @@ import warnings import pandas as pd -import re import metcalcpy.util.utils as utils from ..series import Series from ..util import prepare_pct_roc, prepare_ctc_roc @@ -78,7 +77,6 @@ def _create_series_points(self): df_sum = None if self.config.linetype_ctc: - subset_df = self._add_ctc_columns(subset_df) pody, pofd, thresh = prepare_ctc_roc(subset_df, self.config.ctc_ascending) if self.config.summary_curve != 'none': @@ -151,69 +149,4 @@ def _subset_data(self, df_full, permutation): return df_subset - def _add_ctc_columns(self, df_input): - ''' - Create two new columns in the data frame from the fcst_thresh - column of the CTC linetype data. This will be useful in sorting - based on the fcst_thresh values. - - Args: - @param df_input: the dataframe containing all the CTC data - - Returns: - @param thresh_sorted: a new dataframe that is sorted based on - the threshold value and threshold operator - that comprise the fcst_thresh column. - If two or more threshold values are identical, - use the threshold operator (<,<=,==, >=,>) - to determine the order. - ''' - # If the df_input dataframe is empty (most likely as a result of event equalization), - # return the df_input data frame. - if df_input.empty: - return df_input - - # From the fcst_thresh column, create two new columns, thresh_values and - # op_wts that we can then sort using Pandas' multi-column sorting - # capability. - operators = [] - values = [] - thresholds = df_input['fcst_thresh'] - # Assign weights to the operators, 1 for the <, 5 for the > so that - # > supercedes all other operators. - wt_maps = {'<': 1, '<=': 2, '==': 3, '>=': 4, '>': 5} - wts = [] - for thrsh in thresholds: - # treat the fcst_thresh as two groups, one for - # the operator and the other for the value (which - # can be a negative value). - match = re.match(r'(\<|\<=|\==|\>=|\>)*((-)*([0-9])(.)*)', thrsh) - match_text = re.match(r'(\<|\<=|\==|\>=|\>)*(.*)', thrsh) - if match: - operators.append(match.group(1)) - value = float(match.group(2)) - values.append(value) - elif match_text: - operators.append(match_text.group(1)) - value = match_text.group(2) - values.append(value) - else: - raise ValueError("fcst_thresh has a value that doesn't conform to " - "the expected format") - - for operator in operators: - # if no operator precedes the number in fcst_thresh, - # then assume this is the same as == and assign a weight of 3 - if operator is None: - wts.append(3) - else: - wts.append(wt_maps[operator]) - - # Add these columns to the input dataframe - df_input['thresh_values'] = values - df_input['op_wts'] = wts - - # return the input dataframe with two additional columns if - # everything worked as expected - return df_input diff --git a/metplotpy/plots/scatter/scatter.py b/metplotpy/plots/scatter/scatter.py index 207e7b52..c61ed4ab 100644 --- a/metplotpy/plots/scatter/scatter.py +++ b/metplotpy/plots/scatter/scatter.py @@ -20,6 +20,9 @@ from plots.base_plot import BasePlot from metplotpy.plots import util +from metplotpy.plots import util + + class Scatter(BasePlot): """ Generates a Plotly scatter plot, @@ -136,20 +139,11 @@ def main(): custom config file. """ - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - with open("./custom_scatter.yaml", 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - - logger = util.get_common_logger(docs['log_level'], docs['log_filename']) - + params = util.get_params("./custom_scatter.yaml") try: - s = Scatter(docs, logger) - s.save_to_file() - # s.show_in_browser() + s = Scatter(params) + s.save_to_file() + s.show_in_browser() except ValueError as ve: print(ve) diff --git a/metplotpy/plots/scatter/scatter_defaults.yml b/metplotpy/plots/scatter/scatter_defaults.yaml similarity index 100% rename from metplotpy/plots/scatter/scatter_defaults.yml rename to metplotpy/plots/scatter/scatter_defaults.yaml diff --git a/metplotpy/plots/skew_t/skew_t.py b/metplotpy/plots/skew_t/skew_t.py index caadfb99..63a0a32e 100644 --- a/metplotpy/plots/skew_t/skew_t.py +++ b/metplotpy/plots/skew_t/skew_t.py @@ -37,7 +37,7 @@ warnings.filterwarnings(action='ignore', category=UserWarning) -def extract_sounding_data(input_file): +def extract_sounding_data(input_file, output_directory): with open(input_file) as infile: data = infile.readlines() @@ -64,12 +64,14 @@ def extract_sounding_data(input_file): # Save the sounding data into a text file, which will then be converted into a # pandas dataframe. - with open("sounding_data.dat", "w") as txt_file: + sounding_data_file = os.path.join(output_directory, 'sounding_data.dat') + os.makedirs(output_directory, exist_ok=True) + with open(sounding_data_file, "w") as txt_file: for line in sounding_data: txt_file.write("".join(line) + "\n") # Read in the current sounding data file, replacing any 9999 values with NaN. - df_raw: pandas.DataFrame = pd.read_csv("sounding_data.dat", delim_whitespace=True, + df_raw: pandas.DataFrame = pd.read_csv(sounding_data_file, delim_whitespace=True, skiprows=1, na_values=['9999']) @@ -503,8 +505,6 @@ def create_skew_t(input_file: str, config: dict) -> None: Return: None, generate plots as png files in the specified output file directory. ''' - - file_only = os.path.basename(input_file) logger.info(f" Creating skew T plots for input file {file_only} ") @@ -515,7 +515,7 @@ def create_skew_t(input_file: str, config: dict) -> None: f"GENERATED.") return - sounding_df, plevs = extract_sounding_data(input_file) + sounding_df, plevs = extract_sounding_data(input_file, config['output_directory']) # Check if sounding data consists entirely of na-values. all_na = check_for_all_na(sounding_df) @@ -690,59 +690,50 @@ def main(config_filename=None): Returns: ''' - if not config_filename: - config_file = util.read_config_from_command_line() + config = util.get_params(config_filename) + + # Set up the logging. + log_dir = config['log_directory'] + log_file = config['log_filename'] + log_full_path = os.path.join(log_dir, log_file) + try: + os.makedirs(log_dir, exist_ok=True) + except FileExistsError: + # If directory already exists, this is OK. Continue. + pass + + log_level = config['log_level'] + format_str = "'%(asctime)s||%(levelname)s||%(funcName)s||%(message)s'" + if log_level == 'DEBUG': + logging.basicConfig(filename=log_full_path, level=logging.DEBUG, + format=format_str, + filemode='w') + elif log_level == 'INFO': + logging.basicConfig(filename=log_full_path, level=logging.INFO, + format=format_str, + filemode='w') + elif log_level == 'WARNING': + logging.basicConfig(filename=log_full_path, level=logging.WARNING, + format=format_str, + filemode='w') else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - config = yaml.load(stream, Loader=yaml.FullLoader) - - # Set up the logging. - log_dir = config['log_directory'] - log_file = config['log_filename'] - log_full_path = os.path.join(log_dir, log_file) - try: - os.makedirs(log_dir, exist_ok=True) - except FileExistsError: - # If directory already exists, this is OK. Continue. - pass - - log_level = config['log_level'] - format_str = "'%(asctime)s||%(levelname)s||%(funcName)s||%(message)s'" - if log_level == 'DEBUG': - logging.basicConfig(filename=log_full_path, level=logging.DEBUG, - format=format_str, - filemode='w') - elif log_level == 'INFO': - logging.basicConfig(filename=log_full_path, level=logging.INFO, - format=format_str, - filemode='w') - elif log_level == 'WARNING': - logging.basicConfig(filename=log_full_path, level=logging.WARNING, - format=format_str, - filemode='w') - else: - # log_level == 'ERROR' - logging.basicConfig(filename=log_full_path, level=logging.ERROR, - format=format_str, - filemode='w') - - # Get the list of input files to visualize. - input_dir = config['input_directory'] - file_ext = config['input_file_extension'] - files_of_interest = [] - - for root, dir, files in os.walk(input_dir): - for item in files: - if item.endswith(file_ext): - files_of_interest.append(os.path.join(root, item)) - # Create skew T diagrams for each input file. - for file_of_interest in files_of_interest: - create_skew_t(file_of_interest, config) - - except yaml.YAMLError as exc: - logger.error(f"YAMLError: {exc}") + # log_level == 'ERROR' + logging.basicConfig(filename=log_full_path, level=logging.ERROR, + format=format_str, + filemode='w') + + # Get the list of input files to visualize. + input_dir = config['input_directory'] + file_ext = config['input_file_extension'] + files_of_interest = [] + + for root, _, files in os.walk(input_dir): + for item in files: + if item.endswith(file_ext): + files_of_interest.append(os.path.join(root, item)) + # Create skew T diagrams for each input file. + for file_of_interest in files_of_interest: + create_skew_t(file_of_interest, config) if __name__ == "__main__": diff --git a/metplotpy/plots/taylor_diagram/taylor_diagram.py b/metplotpy/plots/taylor_diagram/taylor_diagram.py index f80a112c..6dfa5d02 100644 --- a/metplotpy/plots/taylor_diagram/taylor_diagram.py +++ b/metplotpy/plots/taylor_diagram/taylor_diagram.py @@ -22,14 +22,11 @@ import warnings from datetime import datetime import matplotlib.pyplot as plt -import numpy -import pandas from matplotlib.font_manager import FontProperties from matplotlib.projections import PolarAxes import mpl_toolkits.axisartist.floating_axes as fa import mpl_toolkits.axisartist.grid_finder as gf import numpy as np -import yaml import pandas as pd from metplotpy.plots import constants from metplotpy.plots.base_plot import BasePlot @@ -192,7 +189,7 @@ def _create_figure(self) -> None: rlocs = np.concatenate((-rlocs[:0:-1], rlocs)) # Convert to polar angles - tick_locations: numpy.array = np.arccos(rlocs) + tick_locations: np.array = np.arccos(rlocs) # positions gl1 = gf.FixedLocator(tick_locations) tf1 = gf.DictFormatter(dict(zip(tick_locations, map(str, rlocs)))) @@ -361,25 +358,12 @@ def main(config_filename=None): Returns: """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./custom_taylor_diagram.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs: dict = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) - + params = util.get_params(config_filename) try: - TaylorDiagram(docs) + TaylorDiagram(params) except ValueError as value_error: - logger = util.get_common_logger(docs['log_level'], docs['log_filename']) + logger = util.get_common_logger(params['log_level'], params['log_filename']) logger.error(f"ValueError {value_error}") diff --git a/metplotpy/plots/tcmpr_plots/box/tcmpr_point.py b/metplotpy/plots/tcmpr_plots/box/tcmpr_point.py index d9305ee7..44cab41b 100755 --- a/metplotpy/plots/tcmpr_plots/box/tcmpr_point.py +++ b/metplotpy/plots/tcmpr_plots/box/tcmpr_point.py @@ -1,37 +1,45 @@ import os +from datetime import datetime import plotly.graph_objects as go +from metplotpy.plots import util from metplotpy.plots.tcmpr_plots.box.tcmpr_box_point import TcmprBoxPoint from metplotpy.plots.tcmpr_plots.tcmpr_series import TcmprSeries class TcmprPoint(TcmprBoxPoint): - def __init__(self, config_obj, column_info, col, case_data, input_df, baseline_data): - super().__init__(config_obj, column_info, col, case_data, input_df, baseline_data) - print("--------------------------------------------------------") - print(f"Plotting POINT time series by {self.config_obj.series_val_names[0]}") + def __init__(self, config_obj, column_info, col, case_data, input_df, baseline_data, stat_name): + super().__init__(config_obj, column_info, col, case_data, input_df, baseline_data, stat_name) + # Set up Logging + self.point_logger = util.get_common_logger(self.config_obj.log_level, self.config_obj.log_filename) - self._adjust_titles() - self.series_list = self._create_series(self.input_df) + self.point_logger.info("--------------------------------------------------------") + self.point_logger.info(f"Plotting POINT time series by {self.config_obj.series_val_names[0]}") + start = datetime.now() + + self._adjust_titles(stat_name) + self.series_list = self._create_series(self.input_df, stat_name) self.case_data = None self.cur_baseline = baseline_data['cur_baseline'] self.cur_baseline_data = baseline_data['cur_baseline_data'] self._init_hfip_baseline_for_plot() if self.config_obj.prefix is None or len(self.config_obj.prefix) == 0: - self.plot_filename = f"{self.config_obj.plot_dir}{os.path.sep}{self.config_obj.list_stat_1[0]}_pointplot.png" + self.plot_filename = f"{self.config_obj.plot_dir}{os.path.sep}{stat_name}_pointplot.png" else: - self.plot_filename = f"{self.config_obj.plot_dir}{os.path.sep}{self.config_obj.prefix}_pointplot.png" + self.plot_filename = f"{self.config_obj.plot_dir}{os.path.sep}{self.config_obj.prefix}_{stat_name}_pointplot.png" + # remove the old file if it exists - # remove the old file if it exist if os.path.exists(self.plot_filename): os.remove(self.plot_filename) self._create_figure() - def _adjust_titles(self): + self.point_logger.info(f"Finished generating the TCMPR points in {datetime.now() - start} ms") + + def _adjust_titles(self, stat_name): if self.yaxis_1 is None or len(self.yaxis_1) == 0: - self.yaxis_1 = self.config_obj.list_stat_1[0] + '(' + self.col['units'] + ')' + self.yaxis_1 = stat_name + '(' + self.col['units'] + ')' if self.title is None or len(self.title) == 0: self.title = 'Point Plots of ' + self.col['desc'] + ' by ' \ @@ -57,28 +65,78 @@ def _draw_series(self, series: TcmprSeries) -> None: boxpoints = 'all' # create a trace - self.figure.add_trace( - go.Box(x=series.series_data['LEAD_HR'], - y=series.series_data['PLOT'], - mean=series.series_points['mean'], - notched=self.config_obj.box_notch, - line=line_color, - fillcolor=fillcolor, - name=series.user_legends, - showlegend=True, - # quartilemethod='linear', #"exclusive", "inclusive", or "linear" - boxmean=self.config_obj.box_avg, - boxpoints=boxpoints, # outliers, all, False - pointpos=0, - marker=dict(size=4, - color=marker_color, + + # line plot, when connect_points is False in config file + if 'point' in self.config_obj.plot_type_list: + if self.config_obj.connect_points: + # line plot + mode = 'lines+markers' + else: + # points only + mode = 'markers' + # Create a point plot + + # Ensure that the size of the list of x and y values + # are the same, or the resulting plot will be incorrect. + # This mismatch occurs when the x_list represents the + # available lead hours in the series data and the + # series_points has None where there isn't data corresponding + # to lead hours in the series_points dataframe. + # + y_list = series.series_points['mean'] + x_list = series.series_data['LEAD_HR'] + if len(x_list) != len(y_list): + # Clean up None values in the series.series_points['mean'] list + # The None values are assigned by the _create_series_points() method. + y_list = [y_values for y_values in y_list if y_values is not None] + + self.figure.add_trace( + go.Scatter(x=x_list, + y=y_list, + showlegend=True, + mode=mode, + name=self.config_obj.user_legends[series.idx], + marker=dict( + color=marker_line_color, + size=8, + opacity=0.7, line=dict( - width=1, - color=marker_line_color - ), - symbol=marker_symbol, - ), - jitter=0 - ), - secondary_y=series.y_axis != 1 - ) + color=self.config_obj.colors_list[series.idx], + width=1 + ) + ), + ), + secondary_y=series.y_axis != 1 + ) + + # When a line plot is requested, connect any gaps + if self.config_obj.connect_points: + self.figure.update_traces(connectgaps=True) + + else: + # Boxplot + self.figure.add_trace( + go.Box(x=series.series_data['LEAD_HR'], + y=series.series_data['PLOT'], + mean=series.series_points['mean'], + notched=self.config_obj.box_notch, + line=line_color, + fillcolor=fillcolor, + name=series.user_legends, + showlegend=True, + boxmean=self.config_obj.box_avg, + boxpoints=boxpoints, # outliers, all, False + pointpos=0, + marker=dict(size=4, + color=marker_color, + line=dict( + width=1, + color=marker_line_color + ), + symbol=marker_symbol, + ), + jitter=0 + ), + secondary_y=series.y_axis != 1 + ) + diff --git a/metplotpy/plots/tcmpr_plots/tcmpr.py b/metplotpy/plots/tcmpr_plots/tcmpr.py index 5ac4cc8d..9988e064 100755 --- a/metplotpy/plots/tcmpr_plots/tcmpr.py +++ b/metplotpy/plots/tcmpr_plots/tcmpr.py @@ -420,8 +420,11 @@ def save_to_file(self): # Create the directory for the output plot if it doesn't already exist dirname = os.path.dirname(os.path.abspath(self.plot_filename)) - if not os.path.exists(dirname): - os.mkdir(dirname) + try: + os.makedirs(dirname, exist_ok=True) + except FileExistsError: + pass + self.logger.info(f'Saving the image file: {self.plot_filename}') if self.figure: try: @@ -511,8 +514,6 @@ def perform_event_equalization(input_df:pd.DataFrame, is_skill:bool, config_obj: return output_data - - def main(config_filename=None): """ Generates a sample, default, TCMPR plot using a combination of @@ -522,18 +523,7 @@ def main(config_filename=None): Args: @param config_filename: default is None, the name of the custom config file to apply """ - - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) + docs = util.get_params(config_filename) # Determine location of the default YAML config files and then # read defaults stored in YAML formatted file into the dictionary @@ -542,17 +532,31 @@ def main(config_filename=None): else: location = os.path.realpath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config')) - with open(os.path.join(location, "tcmpr_defaults.yaml"), 'r') as stream: - try: - defaults = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) + defaults = util.get_params(os.path.join(location, "tcmpr_defaults.yaml")) # merge user defined parameters into defaults if they exist docs = {**defaults, **docs} config_obj = TcmprConfig(docs) + # Create the requested plot(s) + create_plot(config_obj) + + +def create_plot(config_obj: dict) -> None: + """ + One or more TCMPR plots is generated. Event equalization is performed if + it was requested by a setting in the yaml configuration file. + + Args: + @param config_obj: The config object containing all the necessary information obtained + from the yaml configuration file. + + Returns: None, creates one or more plots as specified in the yaml config file + """ + + # Find input files, they must have the .tcst extension and filename must have + # the prefix "tc_pairs" (e.g. tc_pairs_gfso_20220401.tcst) tcst_files = [] # list all .tcst files in tcst_dir if config_obj.tcst_dir is not None and len(config_obj.tcst_dir) > 0 and os.path.exists(config_obj.tcst_dir): @@ -566,7 +570,9 @@ def main(config_filename=None): input_df = orig_input_df.copy(deep=True) # Define a demo and retro column - # TODO these values never get used comment out for now + + # Note: Currently not supported, leave commented out for now. + # input_df = orig_input_df.copy(deep=True) # if config_obj.demo_yr is not None and config_obj.demo_yr != 'NA': # demo_yr_obj = datetime.strptime(str(config_obj.demo_yr), '%Y') @@ -579,6 +585,7 @@ def main(config_filename=None): quotechar='"', skipinitialspace=True, encoding='utf-8') logger = util.get_common_logger(config_obj.log_level, config_obj.log_filename) +\ for plot_type in config_obj.plot_type_list: # Apply event equalization, if requested @@ -586,11 +593,11 @@ def main(config_filename=None): is_skill = False if config_obj.use_ee: if plot_type == 'skill_mn' or plot_type == 'skill_md': - is_skill = True - # perform event equalization on the skill_mn|skill_md plot type - logger.info(f"Perform event equalization for {plot_type}: {datetime.now()}") - output_result = perform_event_equalization(orig_input_df, is_skill, config_obj) - input_df = output_result + is_skill = True + # perform event equalization on the skill_mn|skill_md plot type + logger.info(f"Perform event equalization for {plot_type}: {datetime.now()}") + output_result = perform_event_equalization(orig_input_df, is_skill, config_obj) + input_df = output_result else: logger.info(f"Perform event equalization for {plot_type}: {datetime.now()}") output_result = perform_event_equalization(orig_input_df, is_skill, config_obj) @@ -641,7 +648,7 @@ def main(config_filename=None): elif plot_type == 'skill_mn': from metplotpy.plots.tcmpr_plots.skill.mean.tcmpr_skill_mean import TcmprSkillMean plot = TcmprSkillMean(config_obj, column_info, col_to_plot, common_case_data, input_df, - cur_stat, baseline_data) + cur_stat, baseline_data) elif plot_type == 'skill_md': from metplotpy.plots.tcmpr_plots.skill.median.tcmpr_skill_median import TcmprSkillMedian plot = TcmprSkillMedian(config_obj, column_info, col_to_plot, common_case_data, input_df, cur_stat) @@ -683,7 +690,10 @@ def read_tcst_files(config_obj, tcst_files): for file in tcst_files: if os.path.exists(file): print(f'Reading track data:{file}') - file_df = pd.read_csv(file, sep=r'\s+|;|:', header='infer', engine="python") + if config_obj.is_tcdiag: + file_df = pd.read_csv(file, sep='\t') + else: + file_df = pd.read_csv(file, sep=r'\s+|;|:', header='infer', engine="python") file_df['LEAD_HR'] = file_df['LEAD'] / 10000 file_df['LEAD_HR'] = file_df['LEAD_HR'].astype('int') all_filters = [] @@ -704,7 +714,10 @@ def read_tcst_files(config_obj, tcst_files): # use numpy to select the rows where any record evaluates to True mask = np.array(all_filters).all(axis=0) - file_df['VALID_TIME'] = pd.to_datetime(file_df['VALID'], format='%Y%m%d_%H%M%S') # 20170417_060000 + if config_obj.is_tcdiag: + file_df['VALID_TIME'] = file_df['VALID'] + else: + file_df['VALID_TIME'] = pd.to_datetime(file_df['VALID'], format='%Y%m%d_%H%M%S') # 20170417_060000 # Define a case column file_df['equalize'] = file_df.loc[:, 'BMODEL'].astype(str) \ + ':' + file_df.loc[:, 'STORM_ID'].astype(str) \ diff --git a/metplotpy/plots/tcmpr_plots/tcmpr_config.py b/metplotpy/plots/tcmpr_plots/tcmpr_config.py index d8baa869..0d75e358 100755 --- a/metplotpy/plots/tcmpr_plots/tcmpr_config.py +++ b/metplotpy/plots/tcmpr_plots/tcmpr_config.py @@ -28,6 +28,7 @@ class TcmprConfig(Config): Prepares and organises Line plot parameters """ SUPPORTED_PLOT_TYPES = ['boxplot', 'point', 'mean', 'median', 'relperf', 'rank', 'skill_mn', 'skill_md'] + def __init__(self, parameters: dict) -> None: """ Reads in the plot settings from a box plot config file. @@ -37,6 +38,9 @@ def __init__(self, parameters: dict) -> None: """ super().__init__(parameters) + self.is_tcdiag = self._get_bool('is_tcdiag_linetype') + self.connect_points = self._get_bool('connect_points') + # Logging self.log_filename = self.get_config_value('log_filename') self.log_level = self.get_config_value('log_level') @@ -251,7 +255,8 @@ def _get_hfip_bsln(self) -> str: """ hfip_bsln = str(self.get_config_value('hfip_bsln')) - hfip_bsln = hfip_bsln.lower() + hfip_bsln_lower = hfip_bsln.lower() + # Validate that hfip_bsln is one of the following; (no, 0, 5, 10 year goal) supported_bsln = ['no', '0', '5', '10'] diff --git a/metplotpy/plots/util.py b/metplotpy/plots/util.py index 3d531abb..dc024000 100644 --- a/metplotpy/plots/util.py +++ b/metplotpy/plots/util.py @@ -13,12 +13,12 @@ __author__ = 'Minna Win' import argparse -from typing import Tuple import sys -import getpass +import os import logging import gc import re +from datetime import datetime import matplotlib import numpy as np from typing import Union @@ -27,6 +27,7 @@ from metplotpy.plots.context_filter import ContextFilter as cf import metcalcpy.util.pstd_statistics as pstats import metcalcpy.util.ctc_statistics as cstats +from metcalcpy.util.read_env_vars_in_config import parse_config COLORSCALES = { 'green_red': ['#E6FFE2', '#B3FAAD', '#74F578', '#30D244', '#00A01E', '#F6A1A2', @@ -73,6 +74,43 @@ def read_config_from_command_line(): return args.Path +def get_params(config_filename): + """!Read config_filename or get config file from command line, then parse + config file and return it as a dictionary. + + @param config_filename The full path to the config file or None + @returns dictionary containing parameters for plot + """ + config_file = config_filename if config_filename else read_config_from_command_line() + return parse_config(config_file) + + +def make_plot(config_filename, plot_class): + """!Get plot parameters and create the plot. + + @param config_filename The full path to the config or None + @param plot_class class of plot to produce, e.g. Bar or Box + @returns plot class object or None if something went wrong + """ + # Retrieve the contents of the custom config file to over-ride + # or augment settings defined by the default config file. + params = get_params(config_filename) + try: + plot = plot_class(params) + plot.save_to_file() + #if plot.config_obj.show_in_browser: + # plot.show_in_browser() + plot.write_html() + plot.write_output_file() + name = plot_class.__name__ if not hasattr(plot_class, 'LONG_NAME') else plot_class.LONG_NAME + plot.logger.info(f"Finished {name} plot at {datetime.now()}") + return plot + except ValueError as val_er: + print(val_er) + + return None + + def alpha_blending(hex_color: str, alpha: float) -> str: """ Alpha color blending as if on the white background. Useful for gridlines @@ -316,6 +354,13 @@ def get_common_logger(log_level, log_filename): currently in use by a plot type. ''' + # If directory for logfile doesn't exist, create it + log_dir = os.path.dirname(log_filename) + try: + os.makedirs(log_dir, exist_ok=True) + except OSError: + pass + # Supported log levels. log_level = log_level.upper() log_levels = {'DEBUG': logging.DEBUG, 'INFO': logging.INFO, @@ -338,6 +383,7 @@ def get_common_logger(log_level, log_filename): datefmt='%Y-%m-%d %H:%M:%S', filename=log_filename, filemode='w') + logging.getLogger(name='matplotlib').setLevel(logging.CRITICAL) common_logger = logging.getLogger(__name__) f = cf() common_logger.addFilter(f) diff --git a/metplotpy/plots/wind_rose/wind_rose.py b/metplotpy/plots/wind_rose/wind_rose.py index 895d84a4..cc4c9f9f 100644 --- a/metplotpy/plots/wind_rose/wind_rose.py +++ b/metplotpy/plots/wind_rose/wind_rose.py @@ -20,7 +20,6 @@ from typing import Union import pandas as pd import numpy as np -import yaml import re from pathlib import Path @@ -410,25 +409,14 @@ def main(config_filename=None): The location of the input data is defined in either the default or custom config file. """ + params = util.get_params(config_filename) - # Retrieve the contents of the custom config file to over-ride - # or augment settings defined by the default config file. - # with open("./mpr_plot_custom.yaml", 'r') as stream: - if not config_filename: - config_file = util.read_config_from_command_line() - else: - config_file = config_filename - with open(config_file, 'r') as stream: - try: - docs = yaml.load(stream, Loader=yaml.FullLoader) - except yaml.YAMLError as exc: - print(exc) # point to data file in the test dir - if 'stat_input' not in docs: - docs['stat_input'] = str(Path(__file__).parent.parent.parent.parent) + '/test/wind_rose/point_stat_mpr.txt' + if 'stat_input' not in params: + params['stat_input'] = str(Path(__file__).parent.parent.parent.parent) + '/test/wind_rose/point_stat_mpr.txt' try: - plot = WindRosePlot(docs) + plot = WindRosePlot(params) plot.save_to_file() if plot.config_obj.show_in_browser: plot.show_in_browser() diff --git a/requirements.txt b/requirements.txt index e8cc6ad2..6a6f2d44 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,4 +16,4 @@ scipy>=1.11.1 xarray==2023.1.0 eofs==1.4.0 #cartopy==0.21.1 -scikit-learn==1.2.2 +scikit-learn==1.5.0 diff --git a/test/bar/custom_bar.yaml b/test/bar/custom_bar.yaml index 755b219f..3361c2c7 100644 --- a/test/bar/custom_bar.yaml +++ b/test/bar/custom_bar.yaml @@ -135,8 +135,8 @@ ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -stat_input: ./bar.data -plot_filename: ./bar.png +stat_input: !ENV '${TEST_DIR}/bar.data' +plot_filename: !ENV '${TEST_DIR}/bar.png' # To save your log output to a file, specify a path and filename and uncomment the line below. Make sure you have # permissions to the directory you specify. The default, as specified in the default config file is stdout. diff --git a/test/bar/custom_defaultpoints1_bar.yaml b/test/bar/custom_defaultpoints1_bar.yaml index 87991b2b..ba734352 100644 --- a/test/bar/custom_defaultpoints1_bar.yaml +++ b/test/bar/custom_defaultpoints1_bar.yaml @@ -130,8 +130,8 @@ ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -stat_input: ./bar.data -plot_filename: ./bar_defaultpoints1.png +stat_input: !ENV '${TEST_DIR}/bar.data' +plot_filename: !ENV '${TEST_DIR}/bar_defaultpoints1.png' show_legend: -True -True \ No newline at end of file diff --git a/test/bar/custom_points1_bar.yaml b/test/bar/custom_points1_bar.yaml index 7921e801..bfc23987 100644 --- a/test/bar/custom_points1_bar.yaml +++ b/test/bar/custom_points1_bar.yaml @@ -15,7 +15,7 @@ con_series: create_html: 'False' derived_series_1: [] dump_points_1: 'True' -points_path: './intermed_files' +points_path: !ENV '${TEST_DIR}/intermed_files' eqbound_high: 0.001 eqbound_low: -0.001 event_equal: 'False' @@ -129,8 +129,8 @@ ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -stat_input: ./bar.data -plot_filename: ./bar_points1.png +stat_input: !ENV '${TEST_DIR}/bar.data' +plot_filename: !ENV '${TEST_DIR}/bar_points1.png' show_legend: -True diff --git a/test/bar/intermed_files/nan.points1 b/test/bar/nan.points1 similarity index 100% rename from test/bar/intermed_files/nan.points1 rename to test/bar/nan.points1 diff --git a/test/bar/test_bar.py b/test/bar/test_bar.py index 18eb9fae..eb0c5fee 100644 --- a/test/bar/test_bar.py +++ b/test/bar/test_bar.py @@ -5,60 +5,46 @@ from metplotpy.plots.bar import bar # from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) +CLEANUP_FILES = ['bar.png', 'bar.points1'] + @pytest.fixture -def setup(): +def setup(remove_files, setup_env): # Cleanup the plotfile and point1 output file from any previous run - cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_bar.yaml" + remove_files(cwd, CLEANUP_FILES) + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_bar.yaml" # Invoke the command to generate a Bar plot based on # the custom_bar.yaml custom config file. bar.main(custom_config_filename) + @pytest.fixture -def setup_nones(): +def setup_nones(remove_files, setup_env): # Cleanup the plotfile and point1 output file from any previous run - cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "bar_with_nones.yaml" + remove_files(cwd, CLEANUP_FILES) + setup_env(cwd) + custom_config_filename = f"{cwd}/bar_with_nones.yaml" # Invoke the command to generate a Bar plot based on # the custom_bar.yaml custom config file. bar.main(custom_config_filename) -def cleanup(): - # remove the bbar.png and .points files - # from any previous runs - try: - path = os.getcwd() - plot_file = 'bar.png' - points_file_1 = 'bar.points1' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file_1)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - - -@pytest.mark.parametrize("test_input, expected", - (["./bar_expected.png", True], ["./bar.png", True], ["./bar.points1", True])) -def test_files_exist(setup, test_input, expected): +def test_files_exist(setup, remove_files): """ Checking that the plot and data files are getting created """ - assert os.path.isfile(test_input) == expected - cleanup() + check_files = ('bar.png', 'bar.points1') + for test_input in check_files: + print(f'Checking if {cwd}/{test_input} is found') + assert os.path.isfile(f"{cwd}/{test_input}") + remove_files(cwd, check_files) -def test_no_nans_in_points_file(setup): +def test_no_nans_in_points_file(setup, remove_files): """ Checking that the points1 intermediate file does not have any NaN's. This is indicative of a problem with the _create_series_points() method. @@ -68,35 +54,33 @@ def test_no_nans_in_points_file(setup): # Fail if there are any NaN's-this indicates something went wrong with the # line_series.py module's _create_series_points() method. nans_found = False - with open("./bar.points1", "r") as f: + with open(f"{cwd}/bar.points1", "r") as f: data = f.read() if "NaN" in data: nans_found = True - assert nans_found == False - cleanup() + assert not nans_found # Verify that the nan.points1 file does indeed trigger a "nans_found" - with open("./intermed_files/nan.points1", "r") as f: + with open(f"{cwd}/nan.points1", "r") as f: data = f.read() if "NaN" in data: nans_found = True - # assert - assert nans_found == True + assert nans_found + remove_files(cwd, CLEANUP_FILES) @pytest.mark.skip("fails on linux host machines") -def test_images_match(setup): +def test_images_match(setup, remove_files): """ Compare an expected plot with the newly created plot to verify that the plot hasn't changed in appearance. """ - comparison = CompareImages('./bar_expected.png', './bar.png') + comparison = CompareImages(f'{cwd}/bar_expected.png', f'{cwd}/bar.png') assert comparison.mssim == 1 - - cleanup() + remove_files(cwd, CLEANUP_FILES) @pytest.mark.skip("fails on linux host machines") @@ -106,108 +90,72 @@ def test_none_data_images_match(setup_nones): newly created plot to verify that the plot hasn't changed in appearance. """ - comparison = CompareImages('expected_with_nones.png', './bar_with_nones.png') + comparison = CompareImages(f'{cwd}/expected_with_nones.png', f'{cwd}/bar_with_nones.png') assert comparison.mssim == 1 try: - path = os.getcwd() - plot_file = 'bar_with_nones.png' - os.remove(os.path.join(path, plot_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. + plot_file = f'{cwd}/bar_with_nones.png' + os.remove(plot_file) + except OSError: pass -@pytest.mark.parametrize("test_input, expected", - (["./bar_points1.png", True], ["./intermed_files/bar.points1", True])) -def test_point_and_plot_files_exist(test_input, expected): +def test_point_and_plot_files_exist(setup_env, remove_files): """ Checking that the plot and (specified location) intermediate file are getting created """ - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_points1_bar.yaml" - intermed_dir = os.path.join(os.getcwd(), 'intermed_files') + check_files = ("bar_points1.png", "intermed_files/bar.points1") + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_points1_bar.yaml" + intermed_dir = os.path.join(cwd, 'intermed_files') try: os.mkdir(intermed_dir) - except FileExistsError as e: + except FileExistsError: pass # Invoke the command to generate a Bar plot based on # the custom_bar.yaml custom config file. bar.main(custom_config_filename) - assert os.path.isfile(test_input) == expected - # remove the .png and .points files - try: - path = os.getcwd() - plot_file = 'bar_points1.png' - points_file_1 = 'bar.points1' - subdir = os.path.join(path, 'intermed_files') - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(subdir, points_file_1)) - os.rmdir(intermed_dir) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass + for test_input in check_files: + assert os.path.isfile(f"{cwd}/{test_input}") + remove_files(cwd, check_files) -@pytest.mark.parametrize("test_input, expected", - (["./bar_defaultpoints1.png", True], ["./bar.points1", True])) -def test_point_and_plot_files_exist(test_input, expected): +def test_point_and_plot_files_exist_default(setup_env, remove_files): """ Checking that the plot and (specified location) intermediate file are getting created """ - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_defaultpoints1_bar.yaml" + check_files = ("bar_defaultpoints1.png", "bar.points1") + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_defaultpoints1_bar.yaml" # Invoke the command to generate a Bar plot based on # the custom_bar.yaml custom config file. bar.main(custom_config_filename) - assert os.path.isfile(test_input) == expected + for test_input in check_files: + assert os.path.isfile(f"{cwd}/{test_input}") # remove the .png and .points files - try: - path = os.getcwd() - plot_file = 'bar_defaultpoints1.png' - points_file_1 = 'bar.points1' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file_1)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - + remove_files(cwd, check_files) @pytest.mark.skip("fails on linux host machines") -def test_threshold_plotting(): +def test_threshold_plotting(setup_env, remove_files): """ Verify that the bar plot using data with thresholds is correct. """ - # Cleanup the plotfile and point1 output file from any previous run - cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "threshold_bar.yaml" + remove_files(cwd, CLEANUP_FILES) + setup_env(cwd) + custom_config_filename = f"{cwd}/threshold_bar.yaml" # Invoke the command to generate a Bar plot based on # the custom_bar.yaml custom config file. bar.main(custom_config_filename) - comparison = CompareImages('./expected_threshold.png', './threshold_bar.png') + comparison = CompareImages(f'{cwd}/expected_threshold.png', f'{cwd}/threshold_bar.png') assert comparison.mssim == 1 - try: - path = os.getcwd() - plot_file = 'threshold_bar.png' - os.remove(os.path.join(path, plot_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - + remove_files(cwd, ['threshold_bar.png']) diff --git a/test/bar/threshold_bar.yaml b/test/bar/threshold_bar.yaml index 1cee538e..36efd591 100644 --- a/test/bar/threshold_bar.yaml +++ b/test/bar/threshold_bar.yaml @@ -84,7 +84,7 @@ plot_ci: - none plot_disp: - 'True' -plot_filename: threshold_bar.png +plot_filename: !ENV '${TEST_DIR}/threshold_bar.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -112,7 +112,7 @@ show_nstats: 'False' show_signif: - 'False' start_from_zero: 'False' -stat_input: threshold_bar.data +stat_input: !ENV '${TEST_DIR}/threshold_bar.data' sync_yaxes: 'False' title: MET_HYSPLIT_DUST_CSI_Verification_DAY2_12Z_202202 - G246 title_align: 0.5 diff --git a/test/box/custom_box.yaml b/test/box/custom_box.yaml index 0952747f..bd2a1b61 100644 --- a/test/box/custom_box.yaml +++ b/test/box/custom_box.yaml @@ -180,8 +180,8 @@ ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -stat_input: ./box.data -plot_filename: ./box.png +stat_input: !ENV '${TEST_DIR}/box.data' +plot_filename: !ENV '${TEST_DIR}/box.png' # To save your log output to a file, specify a path and filename and uncomment the line below. Make sure you have # permissions to the directory you specify. The default, as specified in the default config file is stdout. diff --git a/test/box/custom_box_defaultpoints1.yaml b/test/box/custom_box_defaultpoints1.yaml index d746a38b..59720457 100644 --- a/test/box/custom_box_defaultpoints1.yaml +++ b/test/box/custom_box_defaultpoints1.yaml @@ -174,8 +174,8 @@ ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -stat_input: ./box.data -plot_filename: ./box_defaultpoints1.png +stat_input: !ENV '${TEST_DIR}/box.data' +plot_filename: !ENV '${TEST_DIR}/box_defaultpoints1.png' show_legend: -True -True diff --git a/test/box/custom_box_points1.yaml b/test/box/custom_box_points1.yaml index d5962f1c..c6be626e 100644 --- a/test/box/custom_box_points1.yaml +++ b/test/box/custom_box_points1.yaml @@ -101,7 +101,7 @@ plot_stat: median plot_type: png16m plot_units: in plot_width: 11.0 -points_path: './intermed_files' +points_path: !ENV '${TEST_DIR}/intermed_files' random_seed: null @@ -173,8 +173,8 @@ ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -stat_input: ./box.data -plot_filename: ./box_points1.png +stat_input: !ENV '${TEST_DIR}/box.data' +plot_filename: !ENV '${TEST_DIR}/box_points1.png' show_legend: -True -True diff --git a/test/box/simple_box.yaml b/test/box/simple_box.yaml index 7bccefaf..c1fc33aa 100644 --- a/test/box/simple_box.yaml +++ b/test/box/simple_box.yaml @@ -112,8 +112,8 @@ title: Simple Example-Box xaxis: FCST_LEAD xaxis_reverse: 'False' -stat_input: ./box.data -plot_filename: ./default_box.png +stat_input: !ENV '${TEST_DIR}/box.data' +plot_filename: !ENV '${TEST_DIR}/default_box.png' show_legend: -True diff --git a/test/box/test_box.py b/test/box/test_box.py index f6fd6fb5..25ab21c4 100644 --- a/test/box/test_box.py +++ b/test/box/test_box.py @@ -3,46 +3,30 @@ from metplotpy.plots.box import box #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) +CLEANUP_FILES = ['box.png', 'box.points1'] @pytest.fixture -def setup(): +def setup(remove_files, setup_env): # Cleanup the plotfile and point1 output file from any previous run - cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - #os.environ['METPLOTPY_BASE'] = "../../metplotpy" - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_box.yaml" + remove_files(cwd, CLEANUP_FILES) + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_box.yaml" # Invoke the command to generate a Performance Diagram based on # the test_custom_performance_diagram.yaml custom config file. box.main(custom_config_filename) -def cleanup(): - # remove the box.png and .points files - # from any previous runs - try: - path = os.getcwd() - plot_file = 'box.png' - points_file_1 = 'box.points1' - os.remove(os.path.join(path, points_file_1)) - os.remove(os.path.join(path, plot_file)) - - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - - @pytest.mark.parametrize("test_input, expected", - (["./box.png", True],["./box.points1", True])) -def test_files_exist(setup, test_input, expected): + (["box.png", True], ["box.points1", True])) +def test_files_exist(setup, test_input, expected, remove_files): """ Checking that the plot and data files are getting created """ - assert os.path.isfile(test_input) == expected - cleanup() + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, CLEANUP_FILES) + @pytest.mark.skip("fails on linux hosts") def test_images_match(setup): @@ -51,78 +35,57 @@ def test_images_match(setup): newly created plot to verify that the plot hasn't changed in appearance. """ - comparison = CompareImages('./box_expected.png', './box.png') + comparison = CompareImages(f'{cwd}/box_expected.png', f'{cwd}/box.png') assert comparison.mssim == 1 - cleanup() + remove_files(cwd, CLEANUP_FILES) @pytest.mark.parametrize("test_input, expected", - (["./box_expected.png", True],["./box_points1.png", True],["./intermed_files/box.points1", True])) -def test_points1_file_exist(test_input, expected): + (["box_expected.png", True], ["box_points1.png", True], ["intermed_files/box.points1", True])) +def test_points1_file_exist(setup_env, test_input, expected, remove_files): """ Checking that the plot is created and points1 output files is created where specified in the custom_box_points1.yaml file """ - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_box_points1.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_box_points1.yaml" try: - os.mkdir(os.path.join(os.getcwd(), './intermed_files')) - except FileExistsError as e: + os.mkdir(os.path.join(cwd, 'intermed_files')) + except FileExistsError: pass # Invoke the command to generate a box plot based on # the custom_box_points1.yaml custom config file. box.main(custom_config_filename) - assert os.path.isfile(test_input) == expected - try: - path = os.getcwd() - plot_file = 'box_points1.png' - points_file_1 = 'box.points1' - subdir = os.path.join(path, './intermed_files') - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(subdir, points_file_1)) - os.rmdir(subdir) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, ['box_points1.png']) @pytest.mark.parametrize("test_input, expected", - (["./box_defaultpoints1.png", True],["./box.points1", True])) -def test_defaultpoints1_file_exist(test_input, expected): + (["box_defaultpoints1.png", True], ["box.points1", True])) +def test_defaultpoints1_file_exist(setup_env, test_input, expected, remove_files): """ Checking that the plot is created and points1 output files is created in the default location (i.e. the current working dir, where the box.data file resides) """ - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_box_defaultpoints1.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_box_defaultpoints1.yaml" # Invoke the command to generate a box plot based on # the custom_box_defaultpoints1.yaml custom config file. box.main(custom_config_filename) - assert os.path.isfile(test_input) == expected + assert os.path.isfile(f"{cwd}/{test_input}") == expected # remove the created plot and intermediate .points1 file - try: - path = os.getcwd() - plot_file = 'box_defaultpoints1.png' - points_file_1 = 'box.points1' - os.remove(os.path.join(path, points_file_1)) - os.remove(os.path.join(path, plot_file)) - - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass + remove_files(cwd, ['box_defaultpoints1.png', 'box.points1']) -def test_no_nans_in_points_file(setup): +def test_no_nans_in_points_file(setup, remove_files): """ Checking that the points1 file does not contain NaN's """ - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_box_defaultpoints1.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + custom_config_filename = f"{cwd}/custom_box_defaultpoints1.yaml" # Invoke the command to generate a box plot based on # the custom_box_defaultpoints1.yaml custom config file. @@ -132,33 +95,22 @@ def test_no_nans_in_points_file(setup): # Fail if there are any NaN's-this indicates something went wrong with the # line_series.py module's _create_series_points() method. nans_found = False - with open("./box.points1", "r") as f: + with open(f"{cwd}/box.points1", "r") as f: data = f.read() if "NaN" in data: nans_found = True - assert nans_found == False - cleanup() + assert not nans_found + remove_files(cwd, CLEANUP_FILES) # Verify that the nan.points1 file does indeed trigger a "nans_found" - with open("./nan.points1", "r") as f: + with open(f"{cwd}/nan.points1", "r") as f: data = f.read() if "NaN" in data: nans_found = True # assert - assert nans_found == True + assert nans_found # remove the created plot and intermediate .points1 file - try: - path = os.getcwd() - plot_file = 'box_defaultpoints1.png' - points_file_1 = 'box.points1' - os.remove(os.path.join(path, points_file_1)) - os.remove(os.path.join(path, plot_file)) - - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - + remove_files(cwd, ['box_defaultpoints1.png', 'box.points1']) diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 00000000..c234f017 --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,33 @@ +import pytest +import os +import shutil + + +@pytest.fixture +def setup_env(): + def set_environ(test_dir): + print("Setting up environment") + os.environ['METPLOTPY_BASE'] = f"{test_dir}/../../" + os.environ['TEST_DIR'] = test_dir + return set_environ + + +@pytest.fixture() +def remove_files(): + def remove_the_files(test_dir, file_list): + print("Removing the files") + # loop over list of files under test_dir and remove them + for file in file_list: + try: + os.remove(os.path.join(test_dir, file)) + except OSError: + pass + + # also remove intermed_files directory if it exists + print("Removing intermed_files directory if it exists") + try: + shutil.rmtree(f"{test_dir}/intermed_files") + except FileNotFoundError: + pass + + return remove_the_files diff --git a/test/contour/custom_contour.yaml b/test/contour/custom_contour.yaml index c6d08fb2..b53e5a71 100644 --- a/test/contour/custom_contour.yaml +++ b/test/contour/custom_contour.yaml @@ -150,8 +150,8 @@ ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -stat_input: ./contour.data -plot_filename: ./contour.png +stat_input: !ENV '${TEST_DIR}/contour.data' +plot_filename: !ENV '${TEST_DIR}/contour.png' # To save your log output to a file, specify a path and filename and uncomment the line below. Make sure you have # permissions to the directory you specify. The default, as specified in the default config file is stdout. diff --git a/test/contour/test_contour.py b/test/contour/test_contour.py index 6a8d6e91..7aff0c56 100644 --- a/test/contour/test_contour.py +++ b/test/contour/test_contour.py @@ -3,6 +3,7 @@ from metplotpy.plots.contour import contour #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) @pytest.fixture def setup(): @@ -10,23 +11,21 @@ def setup(): cleanup() # Set up the METPLOTPY_BASE so that met_plot.py will correctly find # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd # Invoke the command to generate a contour plot based on # the config yaml files. - contour.main("custom_contour.yaml") + contour.main(f"{cwd}/custom_contour.yaml") def cleanup(): # remove the previously created files try: - path = os.getcwd() plot_file = 'contour.png' - os.remove(os.path.join(path, plot_file)) - - + os.remove(os.path.join(cwd, plot_file)) except OSError as e: # Typically, when files have already been removed or # don't exist. Ignore. @@ -34,7 +33,7 @@ def cleanup(): @pytest.mark.parametrize("test_input, expected", - (["./contour_expected.png", True], ["./contour.png", True] + ([f"{cwd}/contour_expected.png", True], [f"{cwd}/contour.png", True] )) def test_files_exist(setup, test_input, expected): """ @@ -50,6 +49,6 @@ def test_images_match(setup): newly created plots to verify that the plot hasn't changed in appearance. """ - comparison = CompareImages('./contour_expected.png', './contour.png') + comparison = CompareImages(f'{cwd}/contour_expected.png', f'{cwd}/contour.png') assert comparison.mssim == 1 cleanup() diff --git a/test/difficulty_index/test_difficulty_index_plotting.py b/test/difficulty_index/test_difficulty_index_plotting.py index a6f9c9ae..5dfe50c8 100755 --- a/test/difficulty_index/test_difficulty_index_plotting.py +++ b/test/difficulty_index/test_difficulty_index_plotting.py @@ -1,12 +1,15 @@ +import os from . import example_difficulty_index as edi +cwd = os.path.dirname(__file__) + def test_difficulty_index_plot(): """ Compare difficulty index values to ensure correctness. """ - file1 = 'swh_North_Pacific_5dy_ensemble.npz' + file1 = f'{cwd}/swh_North_Pacific_5dy_ensemble.npz' lats, lons, fieldijn = edi.load_data(file1) muij, sigmaij = edi.compute_stats(fieldijn) @@ -16,5 +19,6 @@ def test_difficulty_index_plot(): assert 9.475065612792969 == muij[25][100] + if __name__ == "__main__": - test_difficulty_index_plot() + test_difficulty_index_plot() diff --git a/test/eclv/custom_eclv.yaml b/test/eclv/custom_eclv.yaml index aa40457a..18ddd739 100644 --- a/test/eclv/custom_eclv.yaml +++ b/test/eclv/custom_eclv.yaml @@ -112,8 +112,8 @@ ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -stat_input: ./eclv.data -plot_filename: ./eclv.png +stat_input: !ENV '${TEST_DIR}/eclv.data' +plot_filename: !ENV '${TEST_DIR}/eclv.png' # To save your log output to a file, specify a path and filename and uncomment the line below. Make sure you have # permissions to the directory you specify. The default, as specified in the default config file is stdout. diff --git a/test/eclv/custom_eclv_ctc.yaml b/test/eclv/custom_eclv_ctc.yaml index 7cadc20c..80d7c731 100644 --- a/test/eclv/custom_eclv_ctc.yaml +++ b/test/eclv/custom_eclv_ctc.yaml @@ -111,8 +111,8 @@ ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -stat_input: ./eclv_ctc.data -plot_filename: ./eclv_ctc.png +stat_input: !ENV '${TEST_DIR}/eclv_ctc.data' +plot_filename: !ENV '${TEST_DIR}/eclv_ctc.png' show_legend: -True diff --git a/test/eclv/custom_eclv_pct.yaml b/test/eclv/custom_eclv_pct.yaml index b41331c8..bc6e3a4a 100644 --- a/test/eclv/custom_eclv_pct.yaml +++ b/test/eclv/custom_eclv_pct.yaml @@ -102,7 +102,7 @@ ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -stat_input: ./eclv_pct.data -plot_filename: ./eclv_pct.png +stat_input: !ENV '${TEST_DIR}/eclv_pct.data' +plot_filename: !ENV '${TEST_DIR}/eclv_pct.png' show_legend: -True \ No newline at end of file diff --git a/test/eclv/test_eclv.py b/test/eclv/test_eclv.py index 9dd8cda9..37bf24b6 100644 --- a/test/eclv/test_eclv.py +++ b/test/eclv/test_eclv.py @@ -3,6 +3,7 @@ from metplotpy.plots.eclv import eclv #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) @pytest.fixture def setup(): @@ -10,26 +11,26 @@ def setup(): cleanup() # Set up the METPLOTPY_BASE so that met_plot.py will correctly find # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd # Invoke the command to generate a ECLV plots based on # the config yaml files. - eclv.main("custom_eclv_pct.yaml") - eclv.main("custom_eclv.yaml") - eclv.main("custom_eclv_ctc.yaml") + eclv.main(f"{cwd}/custom_eclv_pct.yaml") + eclv.main(f"{cwd}/custom_eclv.yaml") + eclv.main(f"{cwd}/custom_eclv_ctc.yaml") def cleanup(): # remove the previously created files try: - path = os.getcwd() plot_file = 'eclv_pct.png' - os.remove(os.path.join(path, plot_file)) + os.remove(os.path.join(cwd, plot_file)) plot_file = 'eclv.png' - os.remove(os.path.join(path, plot_file)) + os.remove(os.path.join(cwd, plot_file)) plot_file = 'eclv_ctc.png' - os.remove(os.path.join(path, plot_file)) + os.remove(os.path.join(cwd, plot_file)) except OSError as e: # Typically, when files have already been removed or @@ -38,9 +39,9 @@ def cleanup(): @pytest.mark.parametrize("test_input, expected", - (["./eclv_pct_expected.png", True], ["./eclv_pct.png", True], - ["./eclv_ctc_expected.png", True], ["./eclv_ctc.png", True], - ["./eclv_expected.png", True], ["./eclv.png", True])) + ([f"{cwd}/eclv_pct_expected.png", True], [f"{cwd}/eclv_pct.png", True], + [f"{cwd}/eclv_ctc_expected.png", True], [f"{cwd}/eclv_ctc.png", True], + [f"{cwd}/eclv_expected.png", True], [f"{cwd}/eclv.png", True])) def test_files_exist(setup, test_input, expected): """ Checking that the plot files are getting created @@ -55,12 +56,12 @@ def test_images_match(setup): newly created plots to verify that the plot hasn't changed in appearance. """ - comparison = CompareImages('./eclv_pct_expected.png', './eclv_pct.png') + comparison = CompareImages(f'{cwd}/eclv_pct_expected.png', './eclv_pct.png') assert comparison.mssim == 1 - comparison = CompareImages('./eclv_expected.png', './eclv.png') + comparison = CompareImages(f'{cwd}/eclv_expected.png', './eclv.png') assert comparison.mssim == 1 - comparison = CompareImages('./eclv_ctc_expected.png', './eclv_ctc.png') + comparison = CompareImages(f'{cwd}/eclv_ctc_expected.png', './eclv_ctc.png') assert comparison.mssim == 1 cleanup() diff --git a/test/ens_ss/custom2_ens_ss.yaml b/test/ens_ss/custom2_ens_ss.yaml index b9be350d..a01d1cbd 100644 --- a/test/ens_ss/custom2_ens_ss.yaml +++ b/test/ens_ss/custom2_ens_ss.yaml @@ -112,9 +112,9 @@ y2tlab_orient: 1 y2tlab_perp: 1 y2tlab_size: 1.0 -stat_input: ./ens_ss.data -plot_filename: ./intermed_files/ens_ss.png -points_path: ./intermed_files +stat_input: !ENV '${TEST_DIR}/ens_ss.data' +plot_filename: !ENV '${TEST_DIR}/intermed_files/ens_ss.png' +points_path: !ENV '${TEST_DIR}/intermed_files' show_legend: -True -True \ No newline at end of file diff --git a/test/ens_ss/custom_ens_ss.yaml b/test/ens_ss/custom_ens_ss.yaml index 231d2b79..3bfe0585 100644 --- a/test/ens_ss/custom_ens_ss.yaml +++ b/test/ens_ss/custom_ens_ss.yaml @@ -119,8 +119,8 @@ y2tlab_orient: 1 y2tlab_perp: 1 y2tlab_size: 1.0 -stat_input: ./ens_ss.data -plot_filename: ./ens_ss.png +stat_input: !ENV '${TEST_DIR}/ens_ss.data' +plot_filename: !ENV '${TEST_DIR}/ens_ss.png' # To save your log output to a file, specify a path and filename and uncomment the line below. Make sure you have # permissions to the directory you specify. The default, as specified in the default config file is stdout. diff --git a/test/ens_ss/test_ens_ss.py b/test/ens_ss/test_ens_ss.py index d4d85018..9962fc56 100644 --- a/test/ens_ss/test_ens_ss.py +++ b/test/ens_ss/test_ens_ss.py @@ -3,6 +3,7 @@ #from metcalcpy.compare_images import CompareImages from metplotpy.plots.ens_ss import ens_ss +cwd = os.path.dirname(__file__) @pytest.fixture def setup(): @@ -10,8 +11,9 @@ def setup(): cleanup() # Set up the METPLOTPY_BASE so that met_plot.py will correctly find # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_ens_ss.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd + custom_config_filename = f"{cwd}/custom_ens_ss.yaml" # Invoke the command to generate a Bar plot based on # the custom_ens_ss.yaml custom config file. @@ -22,11 +24,10 @@ def cleanup(): # remove the .png and .points files # from any previous runs try: - path = os.getcwd() plot_file = 'ens_ss.png' points_file_1 = 'ens_ss.points1' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file_1)) + os.remove(os.path.join(cwd, plot_file)) + os.remove(os.path.join(cwd, points_file_1)) except OSError as er: # Typically when files have already been removed or # don't exist. Ignore. @@ -34,7 +35,7 @@ def cleanup(): @pytest.mark.parametrize("test_input, expected", - (["./ens_ss.png", True],["./ens_ss.png", True],["./ens_ss.points1", True])) + ([f"{cwd}/ens_ss.png", True],[f"{cwd}/ens_ss.png", True],[f"{cwd}/ens_ss.points1", True])) def test_files_exist( setup, test_input, expected): """ Checking that the plot and data files are getting created @@ -49,23 +50,24 @@ def test_images_match(setup): newly created plot to verify that the plot hasn't changed in appearance. """ - comparison = CompareImages('./ens_ss_expected.png', './ens_ss.png') + comparison = CompareImages(f'{cwd}/ens_ss_expected.png', f'{cwd}/ens_ss.png') assert comparison.mssim == 1 cleanup() @pytest.mark.parametrize("test_input, expected", - (["./intermed_files/ens_ss.png", True], ["./intermed_files/ens_ss.points1", True])) + ([f"{cwd}/intermed_files/ens_ss.png", True], [f"{cwd}/intermed_files/ens_ss.points1", True])) def test_files_exist(test_input, expected): """ Checking that the plot and data files are getting created """ try: - os.mkdir(os.path.join(os.getcwd(), './intermed_files')) + os.mkdir(os.path.join(cwd, 'intermed_files')) except FileExistsError as e: pass - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom2_ens_ss.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd + custom_config_filename = f"{cwd}/custom2_ens_ss.yaml" # Invoke the command to generate a Bar plot based on # the custom_ens_ss.yaml custom config file. @@ -73,8 +75,7 @@ def test_files_exist(test_input, expected): assert os.path.isfile(test_input) == expected cleanup() try: - path = os.getcwd() - subdir = os.path.join(path, "./intermed_files") + subdir = os.path.join(cwd, "intermed_files") plot_file = 'ens_ss.png' points_file_1 = 'ens_ss.points1' os.remove(os.path.join(subdir, plot_file)) diff --git a/test/equivalence_testing_bounds/custom_equivalence_testing_bounds.yaml b/test/equivalence_testing_bounds/custom_equivalence_testing_bounds.yaml index 03cb8f2d..ffec3e5d 100644 --- a/test/equivalence_testing_bounds/custom_equivalence_testing_bounds.yaml +++ b/test/equivalence_testing_bounds/custom_equivalence_testing_bounds.yaml @@ -163,8 +163,8 @@ ytlab_horiz: 0.5 ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -plot_filename: ./equivalence_testing_bounds.png -stat_input: ./equivalence_testing_bounds.data +plot_filename: !ENV '${TEST_DIR}/equivalence_testing_bounds.png' +stat_input: !ENV '${TEST_DIR}/equivalence_testing_bounds.data' show_legend: - 'True' - 'True' diff --git a/test/equivalence_testing_bounds/custom_equivalence_testing_bounds2.yaml b/test/equivalence_testing_bounds/custom_equivalence_testing_bounds2.yaml index 42681fc2..f087c6b8 100644 --- a/test/equivalence_testing_bounds/custom_equivalence_testing_bounds2.yaml +++ b/test/equivalence_testing_bounds/custom_equivalence_testing_bounds2.yaml @@ -83,7 +83,7 @@ plot_stat: median plot_type: png16m plot_units: in plot_width: 11.0 -points_path: ./intermed_files +points_path: !ENV '${TEST_DIR}/intermed_files' random_seed: null series_line_style: - '-' @@ -164,8 +164,8 @@ ytlab_horiz: 0.5 ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -plot_filename: ./intermed_files/equivalence_testing_bounds.png -stat_input: ./equivalence_testing_bounds.data +plot_filename: !ENV '${TEST_DIR}/intermed_files/equivalence_testing_bounds.png' +stat_input: !ENV '${TEST_DIR}/equivalence_testing_bounds.data' show_legend: - 'True' - 'True' diff --git a/test/equivalence_testing_bounds/test_equivalence_testing_bounds.py b/test/equivalence_testing_bounds/test_equivalence_testing_bounds.py index 3bfcd5e3..e7e849e6 100644 --- a/test/equivalence_testing_bounds/test_equivalence_testing_bounds.py +++ b/test/equivalence_testing_bounds/test_equivalence_testing_bounds.py @@ -3,92 +3,72 @@ from metplotpy.plots.equivalence_testing_bounds import equivalence_testing_bounds as etb #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) +CLEANUP_FILES = ['equivalence_testing_bounds.png', 'equivalence_testing_bounds.points1'] @pytest.fixture -def setup(): +def setup(remove_files, setup_env): # Cleanup the plotfile and point1 output file from any previous run - cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./custom_equivalence_testing_bounds.yaml" + remove_files(cwd, CLEANUP_FILES) + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_equivalence_testing_bounds.yaml" # Invoke the command to generate an equivalence testing boundary plot based on # the custom config file. etb.main(custom_config_filename) -def cleanup(): - # remove the line.png and .points1 file - # from any previous runs - try: - path = os.getcwd() - plot_file = 'equivalence_testing_bounds.png' - points_file_1 = 'equivalence_testing_bounds.points1' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file_1)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - - @pytest.mark.parametrize("test_input,expected", - (["./equivalence_testing_bounds.png", True], - ["./equivalence_testing_bounds.points1", True])) -def test_files_exist(setup, test_input, expected): + (["equivalence_testing_bounds.png", True], + ["equivalence_testing_bounds.points1", True])) +def test_files_exist(setup, test_input, expected, remove_files): ''' Checking that the plot and data files are getting created ''' - assert os.path.isfile(test_input) == expected - cleanup() + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, CLEANUP_FILES) + @pytest.mark.skip("skimage differences causing failure") -def test_images_match(setup): +def test_images_match(setup, remove_files): ''' Compare an expected plot with the newly created plot to verify that the plot hasn't changed in appearance. ''' - path = os.getcwd() - plot_file = './equivalence_testing_bounds.png' - actual_file = os.path.join(path, plot_file) - comparison = CompareImages('./equivalence_testing_bounds_expected.png', actual_file) + plot_file = 'equivalence_testing_bounds.png' + actual_file = os.path.join(cwd, plot_file) + comparison = CompareImages(f'{cwd}/equivalence_testing_bounds_expected.png', actual_file) assert comparison.mssim == 1 - cleanup() + remove_files(cwd, CLEANUP_FILES) @pytest.mark.parametrize("test_input,expected", - (["./intermed_files/equivalence_testing_bounds.png", True], - ["./intermed_files/equivalence_testing_bounds.points1", True])) -def test_files_exist(test_input, expected): + (["intermed_files/equivalence_testing_bounds.png", True], + ["intermed_files/equivalence_testing_bounds.points1", True])) +def test_files_exist(setup_env, test_input, expected, remove_files): ''' Checking that the plot and data files are getting created ''' - intermed_dir = os.path.join(os.getcwd(), 'intermed_files') + intermed_dir = os.path.join(cwd, 'intermed_files') try: os.mkdir(intermed_dir) - except FileExistsError as e: + except FileExistsError: pass - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./custom_equivalence_testing_bounds2.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_equivalence_testing_bounds2.yaml" # Invoke the command to generate an equivalence testing boundary plot based on # the custom config file. etb.main(custom_config_filename) - assert os.path.isfile(test_input) == expected + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, ['intermed_files/equivalence_testing_bounds.png', + 'intermed_files/equivalence_testing_bounds.points1', + 'intermed_files/equivalence_testing_bounds.html']) try: - path = os.getcwd() - plot_file = 'equivalence_testing_bounds.png' - points_file_1 = 'equivalence_testing_bounds.points1' - subdir = os.path.join(path, 'intermed_files') - os.remove(os.path.join(subdir, plot_file)) - os.remove(os.path.join(subdir, points_file_1)) os.rmdir(intermed_dir) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. + except OSError: pass diff --git a/test/fv3_physics_tend/fv3_physics_tend_defaults.yaml b/test/fv3_physics_tend/fv3_physics_tend_defaults.yaml index 3e0d825b..748a2d8b 100644 --- a/test/fv3_physics_tend/fv3_physics_tend_defaults.yaml +++ b/test/fv3_physics_tend/fv3_physics_tend_defaults.yaml @@ -2,47 +2,40 @@ # Each type of tendency (moisture, temperature, wind component) has its own set of variables. tendency_varnames: spfh: - - dq3dt_deepcnv - - dq3dt_mp - - dq3dt_pbl - - dq3dt_shalcnv - - dq3dt_nophys + - dtend_qv_pbl + - dtend_qv_deepcnv + - dtend_qv_shalcnv + - dtend_qv_mp + - dtend_qv_phys + - dtend_qv_nophys tmp: - - dt3dt_congwd - - dt3dt_deepcnv - - dt3dt_lw - - dt3dt_mp - - dt3dt_orogwd - - dt3dt_pbl - - dt3dt_rdamp - - dt3dt_shalcnv - - dt3dt_sw - - dt3dt_nophys + - dtend_temp_lw + - dtend_temp_sw + - dtend_temp_pbl + - dtend_temp_deepcnv + - dtend_temp_shalcnv + - dtend_temp_mp + - dtend_temp_orogwd + - dtend_temp_cnvgwd + - dtend_temp_phys + - dtend_temp_nophys ugrd: - - du3dt_congwd - - du3dt_deepcnv - - du3dt_mp - - du3dt_orogwd - - du3dt_pbl - - du3dt_rdamp - - du3dt_shalcnv - - du3dt_nophys + - dtend_u_pbl + - dtend_u_orogwd + - dtend_u_deepcnv + - dtend_u_cnvgwd + - dtend_u_shalcnv + - dtend_u_phys + - dtend_u_nophys vgrd: - - dv3dt_congwd - - dv3dt_deepcnv - - dv3dt_mp - - dv3dt_orogwd - - dv3dt_pbl - - dv3dt_rdamp - - dv3dt_shalcnv - - dv3dt_nophys + - dtend_v_pbl + - dtend_v_orogwd + - dtend_v_deepcnv + - dtend_v_cnvgwd + - dtend_v_shalcnv + - dtend_v_phys + - dtend_v_nophys -# Name of variables in history file that contain the temperature, moisture, wind at time zero (initialization time). -time0_varname: - tmp : tmp_i - spfh: qv_i - ugrd: ugrd_i - vgrd: vgrd_i # Name of the longitude and latitude variables in the grid specification file. @@ -65,5 +58,5 @@ standard_parallel : 38.139 cmap : "Spectral_r" # resolution (dots per inch) of output -dpi : 150 +dpi : 100 diff --git a/test/fv3_physics_tend/runner_planview.py b/test/fv3_physics_tend/runner_planview.py index 5d993406..ce05fb2a 100644 --- a/test/fv3_physics_tend/runner_planview.py +++ b/test/fv3_physics_tend/runner_planview.py @@ -26,7 +26,7 @@ def run_example(config_file): config = open_config(config_file) command_str = "python " + config['source_dir'] + "/planview_fv3.py " + " ./fv3_physics_tend_defaults.yaml " + \ config['history_file'] + " " + config[ - 'grid_file'] + " tmp pbl -p 500 -t 1 -v 20190504T14 --nofineprint " + 'grid_file'] + " tmp pbl -p 500 -t 1 -v 20190615T20 --nofineprint " print("command string: ", command_str) os.system(command_str) @@ -37,7 +37,7 @@ def run_with_novel_output_file(config_file): config = open_config(config_file) command_str = "python " + config['source_dir'] + "/planview_fv3.py " + " ./fv3_physics_tend_defaults.yaml " + \ config['history_file'] + " " + config[ - 'grid_file'] + " tmp pbl -p 500 -t 1 -v 20190504T14 --nofineprint -o ./test_planview.png" + 'grid_file'] + " tmp pbl -p 500 -t 1 -v 20190615T20 --nofineprint -o ./test_planview.png" print("command string: ", command_str) os.system(command_str) @@ -45,7 +45,7 @@ def run_with_novel_output_dir(config_file): '''Run the example in the user's guide specifying a non-existent output directory''' config = open_config(config_file) command_str = "python " + config['source_dir'] + "/planview_fv3.py " + " ./fv3_physics_tend_defaults.yaml " + \ - config['history_file'] + " " + config['grid_file']+ " tmp pbl -p 500 -t 1 -v 20190504T14 --nofineprint \ + config['history_file'] + " " + config['grid_file']+ " tmp pbl -p 500 -t 1 -v 20190615T20 --nofineprint \ -o ./output/test_planview.png" print("command string: ", command_str) os.system(command_str) diff --git a/test/fv3_physics_tend/runner_vert_cross.py b/test/fv3_physics_tend/runner_vert_cross.py index ff3acb1a..9fcadf5e 100644 --- a/test/fv3_physics_tend/runner_vert_cross.py +++ b/test/fv3_physics_tend/runner_vert_cross.py @@ -24,7 +24,7 @@ def run_example(config_file): config = open_config(config_file) command_str = "python " + config['source_dir'] + "/cross_section_vert.py " + " ./fv3_physics_tend_defaults.yaml " + \ config['history_file'] + " " + config[ - 'grid_file'] + " tmp -t 2 -v 20190504T14 -s 32 -115 -e 34 -82 --nofineprint " + 'grid_file'] + " tmp -t 1 -v 20190615T20 -s 32 -115 -e 34 -82 --nofineprint " print("command string: ", command_str) os.system(command_str) @@ -34,7 +34,7 @@ def run_with_novel_output_file(config_file): config = open_config(config_file) command_str = "python " + config['source_dir'] + "/cross_section_vert.py " + " ./fv3_physics_tend_defaults.yaml " + \ config['history_file'] + " " + config[ - 'grid_file'] + " tmp -t 2 -v 20190504T14 -s 32 -115 -e 34 -82 -o ./test_vert_cross.png --nofineprint " + 'grid_file'] + " tmp -t 1 -v 20190615T20 -s 32 -115 -e 34 -82 -o ./test_vert_cross.png --nofineprint " print("command string: ", command_str) os.system(command_str) @@ -43,7 +43,7 @@ def run_with_novel_output_dir(config_file): config = open_config(config_file) command_str = "python " + config['source_dir'] + "/cross_section_vert.py " + " ./fv3_physics_tend_defaults.yaml " + \ config['history_file'] + " " + config[ - 'grid_file'] + " tmp -t 2 -v 20190504T14 -s 32 -115 -e 34 -82 -o ./output/test_vert_cross.png --nofineprint " + 'grid_file'] + " tmp -t 1 -v 20190615T20 -s 32 -115 -e 34 -82 -o ./output/test_vert_cross.png --nofineprint " print("command string: ", command_str) os.system(command_str) diff --git a/test/fv3_physics_tend/runner_vert_profile.py b/test/fv3_physics_tend/runner_vert_profile.py index f2b57eeb..eae282a8 100644 --- a/test/fv3_physics_tend/runner_vert_profile.py +++ b/test/fv3_physics_tend/runner_vert_profile.py @@ -26,7 +26,7 @@ def run_example(config_file): shapefiles_dir = str(tu.get_fv3_shapefiles_dir()) command_str = "python " + config['source_dir'] + "/vert_profile_fv3.py " + " ./fv3_physics_tend_defaults.yaml " + \ config['history_file'] + " " + config[ - 'grid_file'] + " tmp -t 1 -v 20190504T13 -s " + shapefiles_dir + " --nofineprint " + 'grid_file'] + " tmp -t 1 -v 20190615T20 -s " + shapefiles_dir + " --nofineprint " print("command string: ", command_str) os.system(command_str) @@ -45,7 +45,7 @@ def run_example(config_file): shapefile_dir = tu.get_fv3_shapefiles_dir() command_str = "python " + config['source_dir'] + "/vert_profile_fv3.py " + " ./fv3_physics_tend_defaults.yaml " + \ config['history_file'] + " " + config[ - 'grid_file'] + " tmp -t 1 -v 20190504T13 -s " + shapefile_dir + " --nofineprint" + 'grid_file'] + " tmp -t 1 -v 20190615T20 -s " + shapefile_dir + " --nofineprint" print("command string: ", command_str) os.system(command_str) @@ -66,7 +66,7 @@ def run_with_novel_output_file(config_file): shapefile_dir = tu.get_fv3_shapefiles_dir() command_str = "python " + config['source_dir'] + "/vert_profile_fv3.py " + " ./fv3_physics_tend_defaults.yaml " + \ config['history_file'] + " " + config[ - 'grid_file'] + " tmp -t 1 -v 20190504T13 -s " + shapefile_dir + " -o ./test_vert_profile.png --nofineprint" + 'grid_file'] + " tmp -t 1 -v 20190615T20 -s " + shapefile_dir + " -o ./test_vert_profile.png --nofineprint" print("command string: ", command_str) os.system(command_str) @@ -76,7 +76,7 @@ def run_with_novel_output_dir(config_file): shapefile_dir = tu.get_fv3_shapefiles_dir() command_str = "python " + config['source_dir'] + "/vert_profile_fv3.py " + " ./fv3_physics_tend_defaults.yaml " + \ config['history_file'] + " " + config[ - 'grid_file'] + " tmp -t 2 -v 20190504T14 -s " + shapefile_dir + " -o ./output/test_vert_profile.png --nofineprint" + 'grid_file'] + " tmp -t 1 -v 20190615T20 -s " + shapefile_dir + " -o ./output/test_vert_profile.png --nofineprint" print("command string: ", command_str) os.system(command_str) diff --git a/test/histogram/prob_hist.yaml b/test/histogram/prob_hist.yaml index 0600576d..1b72e5c2 100644 --- a/test/histogram/prob_hist.yaml +++ b/test/histogram/prob_hist.yaml @@ -1,5 +1,5 @@ -stat_input: ./prob_hist.data -plot_filename: ./prob_hist.png +stat_input: !ENV '${TEST_DIR}/prob_hist.data' +plot_filename: !ENV '${TEST_DIR}/prob_hist.png' caption_align: 0.0 caption_col: '#333333' diff --git a/test/histogram/rank_hist.yaml b/test/histogram/rank_hist.yaml index 40f12bdd..8cf23ec9 100644 --- a/test/histogram/rank_hist.yaml +++ b/test/histogram/rank_hist.yaml @@ -1,5 +1,5 @@ -stat_input: ./rank_hist.data -plot_filename: ./rank_hist.png +stat_input: !ENV '${TEST_DIR}/rank_hist.data' +plot_filename: !ENV '${TEST_DIR}/rank_hist.png' caption_align: 0.0 caption_col: '#333333' diff --git a/test/histogram/rel_hist.yaml b/test/histogram/rel_hist.yaml index 322e83f0..ebb76e03 100644 --- a/test/histogram/rel_hist.yaml +++ b/test/histogram/rel_hist.yaml @@ -1,5 +1,5 @@ -plot_filename: ./rel_hist.png -stat_input: ./rel_hist.data +plot_filename: !ENV '${TEST_DIR}/rel_hist.png' +stat_input: !ENV '${TEST_DIR}/rel_hist.data' caption_align: 0.0 diff --git a/test/histogram/test_prob_hist.py b/test/histogram/test_prob_hist.py index 4d41f0d7..fc9b3ab4 100644 --- a/test/histogram/test_prob_hist.py +++ b/test/histogram/test_prob_hist.py @@ -3,15 +3,15 @@ from metplotpy.plots.histogram import prob_hist #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) + @pytest.fixture -def setup(): - # Cleanup the plotfile output file from any previous run +def setup(setup_env): cleanup() + setup_env(cwd) - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "prob_hist.yaml" - + custom_config_filename = f"{cwd}/prob_hist.yaml" prob_hist.main(custom_config_filename) @@ -19,22 +19,20 @@ def cleanup(): # remove the rel_hist.png # from any previous runs try: - path = os.getcwd() - plot_file = './prob_hist.png' - os.remove(os.path.join(path, plot_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. + plot_file = 'prob_hist.png' + os.remove(os.path.join(cwd, plot_file)) + except OSError: pass + @pytest.mark.parametrize("test_input, expected", - (["./prob_hist_expected.png", True], - ["./prob_hist.png", True])) + (["prob_hist_expected.png", True], + ["prob_hist.png", True])) def test_files_exist(setup, test_input, expected): """ Checking that the plot and data files are getting created """ - assert os.path.isfile(test_input) == expected + assert os.path.isfile(f"{cwd}/{test_input}") == expected cleanup() @pytest.mark.skip("Image comparisons fail during Github Actions checks.") @@ -44,7 +42,7 @@ def test_images_match(setup): newly created plot to verify that the plot hasn't changed in appearance. """ - comparison = CompareImages("./prob_hist_expected.png", - "./prob_hist.png") + comparison = CompareImages(f"{cwd}/prob_hist_expected.png", + f"{cwd}/prob_hist.png") assert comparison.mssim == 1 cleanup() diff --git a/test/histogram/test_rank_hist.py b/test/histogram/test_rank_hist.py index e2ba10e3..4b9d5b08 100644 --- a/test/histogram/test_rank_hist.py +++ b/test/histogram/test_rank_hist.py @@ -3,16 +3,15 @@ from metplotpy.plots.histogram import rank_hist #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) + @pytest.fixture -def setup(): - # Cleanup the plotfile output file from any previous run +def setup(setup_env): cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "rank_hist.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/rank_hist.yaml" rank_hist.main(custom_config_filename) @@ -20,24 +19,23 @@ def cleanup(): # remove the rel_hist.png # from any previous runs try: - path = os.getcwd() - plot_file = './rank_hist.png' - os.remove(os.path.join(path, plot_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. + plot_file = 'rank_hist.png' + os.remove(os.path.join(cwd, plot_file)) + except OSError: pass + @pytest.mark.parametrize("test_input, expected", - (["./rank_hist_expected.png", True], - ["./rank_hist.png", True])) + (["rank_hist_expected.png", True], + ["rank_hist.png", True])) def test_files_exist(setup, test_input, expected): """ Checking that the plot and data files are getting created """ - assert os.path.isfile(test_input) == expected + assert os.path.isfile(f"{cwd}/{test_input}") == expected cleanup() + @pytest.mark.skip("Image comparisons fail during Github Actions checks.") def test_images_match(setup): """ @@ -45,7 +43,7 @@ def test_images_match(setup): newly created plot to verify that the plot hasn't changed in appearance. """ - comparison = CompareImages("./rank_hist_expected.png", - "./rank_hist.png") + comparison = CompareImages(f"{cwd}/rank_hist_expected.png", + f"{cwd}/rank_hist.png") assert comparison.mssim == 1 cleanup() diff --git a/test/histogram/test_rel_hist.py b/test/histogram/test_rel_hist.py index 2da38dec..a491283c 100644 --- a/test/histogram/test_rel_hist.py +++ b/test/histogram/test_rel_hist.py @@ -3,13 +3,15 @@ from metplotpy.plots.histogram import rel_hist #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) + @pytest.fixture -def setup(): +def setup(setup_env): # Cleanup the plotfile output file from any previous run cleanup() - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "rel_hist.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/rel_hist.yaml" # Invoke the command to generate a histogram based on # the rel_hist.yaml custom config file. @@ -20,24 +22,23 @@ def cleanup(): # remove the rel_hist.png # from any previous runs try: - path = os.getcwd() - plot_file = './rel_hist.png' - os.remove(os.path.join(path, plot_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. + plot_file = 'rel_hist.png' + os.remove(os.path.join(cwd, plot_file)) + except OSError: pass + @pytest.mark.parametrize("test_input, expected", - (["./rel_hist_expected.png", True], - ["./rel_hist.png", True])) + (["rel_hist_expected.png", True], + ["rel_hist.png", True])) def test_files_exist(setup, test_input, expected): """ Checking that the plot and data files are getting created """ - assert os.path.isfile(test_input) == expected + assert os.path.isfile(f"{cwd}/{test_input}") == expected cleanup() + @pytest.mark.skip("Image comparisons fail in Github Actions checks.") def test_images_match(setup): """ @@ -45,7 +46,7 @@ def test_images_match(setup): newly created plot to verify that the plot hasn't changed in appearance. """ - comparison = CompareImages("./rel_hist_expected.png", - "./rel_hist.png") + comparison = CompareImages(f"{cwd}/rel_hist_expected.png", + f"{cwd}/rel_hist.png") assert comparison.mssim == 1 cleanup() diff --git a/test/histogram_2d/custom_histogram_2d.yaml b/test/histogram_2d/custom_histogram_2d.yaml index 1cc13895..292349c2 100644 --- a/test/histogram_2d/custom_histogram_2d.yaml +++ b/test/histogram_2d/custom_histogram_2d.yaml @@ -1,5 +1,5 @@ # replace with the location and name of your choosing: -plot_filename: ./custom_tmp_z2_p500.png +plot_filename: !ENV '${TEST_DIR}/custom_tmp_z2_p500.png' height: 800 width: 1200 @@ -26,7 +26,7 @@ pdf_max: 4.0 # dump_points_2: 'False' # the input data file (replace ./ with the full path) -stat_input: ./grid_diag_temperature.nc +stat_input: !ENV '${TEST_DIR}/grid_diag_temperature.nc' # To save your log output to a file, specify a path and filename and uncomment the line below. Make sure you have # permissions to the directory you specify. The default, as specified in the default config file is stdout. diff --git a/test/histogram_2d/minimal_histogram_2d.yaml b/test/histogram_2d/minimal_histogram_2d.yaml index cfb8c80c..291feb3b 100644 --- a/test/histogram_2d/minimal_histogram_2d.yaml +++ b/test/histogram_2d/minimal_histogram_2d.yaml @@ -1,5 +1,5 @@ # minimal custom config file. Use all default settings in the histogram_2d_defaults.yaml # configuration file -stat_input: ./grid_diag_temperature.nc -plot_filename: ./tmp_z2_p500.png +stat_input: !ENV '${TEST_DIR}/grid_diag_temperature.nc' +plot_filename: !ENV '${TEST_DIR}/tmp_z2_p500.png' diff --git a/test/histogram_2d/test_histogram_2d.py b/test/histogram_2d/test_histogram_2d.py index 4e6e9e95..1bc96a04 100644 --- a/test/histogram_2d/test_histogram_2d.py +++ b/test/histogram_2d/test_histogram_2d.py @@ -1,20 +1,19 @@ import os -import sys import pytest from metplotpy.plots.histogram_2d import histogram_2d as h2d - # from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) + + @pytest.fixture -def setup(): +def setup(setup_env): # Cleanup the plotfile and point1 output file from any previous run # cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./minimal_histogram_2d.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/minimal_histogram_2d.yaml" # print("\n current directory: ", os.getcwd()) # print("\ncustom config file: ", custom_config_filename, '\n') @@ -29,26 +28,11 @@ def setup(): # change the stat_input and plot_filename to explicitly point to this directory before # running the test below because xarray cannot handle relative paths when reading in # filenames -def test_plot_exists(setup): +def test_plot_exists(setup, remove_files): ''' Checking that only the "defaults" plot file is getting created ''' test_input = "tmp_z2_p500.png" - from os.path import exists - file_exists = exists(test_input) - if file_exists: - assert os.path.exists(test_input) - else: - print("File doesn't exist") - assert False - - # cleanup - try: - path = os.getcwd() - plot_file = 'tmp_z2_p500.png' - # os.remove(os.path.join(path, plot_file)) - except OSError: - # Typically when files have already been removed or - # don't exist. Ignore. - pass + assert os.path.exists(f"{cwd}/{test_input}") + remove_files(cwd, ['tmp_z2_p500.png']) diff --git a/test/line/custom_line.yaml b/test/line/custom_line.yaml index b189b8ff..e38a6d85 100644 --- a/test/line/custom_line.yaml +++ b/test/line/custom_line.yaml @@ -94,7 +94,7 @@ plot_disp: - 'True' - 'True' - 'True' -plot_filename: ./line.png +plot_filename: !ENV '${TEST_DIR}/line.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -153,7 +153,7 @@ show_signif: - 'False' - 'False' - 'False' -stat_input: ./line.data +stat_input: !ENV '${TEST_DIR}/line.data' sync_yaxes: 'False' title: test title title_align: 0.5 diff --git a/test/line/custom_line2.yaml b/test/line/custom_line2.yaml index c827af9b..cafc9708 100644 --- a/test/line/custom_line2.yaml +++ b/test/line/custom_line2.yaml @@ -95,14 +95,14 @@ plot_disp: - 'True' - 'True' - 'True' -plot_filename: ./line.png +plot_filename: !ENV '${TEST_DIR}/line.png' plot_height: 8.5 plot_res: 72 plot_stat: median plot_type: png16m plot_units: in plot_width: 11.0 -points_path: ./intermed_files +points_path: !ENV '${TEST_DIR}/intermed_files' random_seed: null series_line_style: - '-' @@ -149,7 +149,7 @@ show_signif: - 'False' - 'False' - 'False' -stat_input: ./line.data +stat_input: !ENV '${TEST_DIR}/line.data' sync_yaxes: 'False' title: test title title_align: 0.5 diff --git a/test/line/custom_line_from_zero.yaml b/test/line/custom_line_from_zero.yaml index cda69849..9d320c8c 100644 --- a/test/line/custom_line_from_zero.yaml +++ b/test/line/custom_line_from_zero.yaml @@ -95,7 +95,7 @@ plot_disp: - 'True' - 'True' - 'True' -plot_filename: ./line_from_zero.png +plot_filename: !ENV '${TEST_DIR}/line_from_zero.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -155,7 +155,7 @@ show_signif: - 'False' - 'False' start_from_zero: 'True' -stat_input: ./line.data +stat_input: !ENV '${TEST_DIR}/line.data' sync_yaxes: 'False' title: test title title_align: 0.5 diff --git a/test/line/custom_line_groups.yaml b/test/line/custom_line_groups.yaml index 6985141a..0eafc444 100644 --- a/test/line/custom_line_groups.yaml +++ b/test/line/custom_line_groups.yaml @@ -87,7 +87,7 @@ plot_disp: - 'True' - 'True' - 'True' -plot_filename: ./line_groups.png +plot_filename: !ENV '${TEST_DIR}/line_groups.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -125,7 +125,7 @@ show_signif: - 'False' - 'False' - 'False' -stat_input: ./line_groups.data +stat_input: !ENV '${TEST_DIR}/line_groups.data' sync_yaxes: 'False' title: test title title_align: 0.5 diff --git a/test/line/custom_line_groups2.yaml b/test/line/custom_line_groups2.yaml index ff6e6b15..0eedd0cf 100644 --- a/test/line/custom_line_groups2.yaml +++ b/test/line/custom_line_groups2.yaml @@ -92,14 +92,14 @@ plot_disp: - 'True' - 'True' - 'True' -plot_filename: ./intermed_files/line_groups.png +plot_filename: !ENV '${TEST_DIR}/intermed_files/line_groups.png' plot_height: 8.5 plot_res: 72 plot_stat: median plot_type: png16m plot_units: in plot_width: 11.0 -points_path: ./intermed_files +points_path: !ENV '${TEST_DIR}/intermed_files' random_seed: null series_line_style: - '-' @@ -131,7 +131,7 @@ show_signif: - 'False' - 'False' - 'False' -stat_input: ./line_groups.data +stat_input: !ENV '${TEST_DIR}/line_groups.data' sync_yaxes: 'False' title: test title title_align: 0.5 diff --git a/test/line/fbias_fixed_vars_vals.yaml b/test/line/fbias_fixed_vars_vals.yaml index 211f7389..75005942 100644 --- a/test/line/fbias_fixed_vars_vals.yaml +++ b/test/line/fbias_fixed_vars_vals.yaml @@ -166,7 +166,7 @@ plot_ci: plot_disp: - 'True' - 'True' -plot_filename: ./fbias_fixed_vars.png +plot_filename: !ENV '${TEST_DIR}/fbias_fixed_vars.png' plot_height: 8.5 plot_res: 72 plot_stat: mean @@ -180,7 +180,7 @@ start_from_zero: True # that is used by METviewer (created when dump_points_1 is set to True) # if dump_points_1 is True and this is uncommented, the points1 file # will be saved in the default location (i.e. where the input data file is stored). -points_path: ./ +points_path: !ENV '${TEST_DIR}/' random_seed: null series_line_style: @@ -207,7 +207,7 @@ show_nstats: 'False' show_signif: - 'False' - 'False' -stat_input: ./fbias_data.txt +stat_input: !ENV '${TEST_DIR}/fbias_data.txt' sync_yaxes: 'False' title: "Fixed variable fcst_thresh >0.0 for FBIAS" title_align: 0.5 diff --git a/test/line/mv_custom_vert_line.yaml b/test/line/mv_custom_vert_line.yaml index 1a9bc292..d26335f0 100644 --- a/test/line/mv_custom_vert_line.yaml +++ b/test/line/mv_custom_vert_line.yaml @@ -102,7 +102,7 @@ plot_ci: plot_disp: - 'True' - 'True' -plot_filename: ./vert_line_plot.png +plot_filename: !ENV '${TEST_DIR}/vert_line_plot.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -114,7 +114,7 @@ plot_width: 11.0 # that is used by METviewer (created when dump_points_1 is set to True) # if dump_points_1 is True and this is uncommented, the points1 file # will be saved in the default location (i.e. where the input data file is stored). -points_path: ./intermed_files +points_path: !ENV '${TEST_DIR}/intermed_files' random_seed: null series_line_style: - '-' @@ -140,7 +140,7 @@ show_nstats: 'True' show_signif: - 'True' - 'True' -stat_input: ./vert_line_plot_data.txt +stat_input: !ENV '${TEST_DIR}/vert_line_plot_data.txt' sync_yaxes: 'False' title: test title title_align: 0.5 diff --git a/test/line/test_line_groups_plot.py b/test/line/test_line_groups_plot.py index e4133e54..a0756c03 100644 --- a/test/line/test_line_groups_plot.py +++ b/test/line/test_line_groups_plot.py @@ -3,6 +3,7 @@ from metplotpy.plots.line import line as l #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) @pytest.fixture def setup(): @@ -10,8 +11,9 @@ def setup(): cleanup() # Set up the METPLOTPY_BASE so that met_plot.py will correctly find # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./custom_line_groups.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd + custom_config_filename = f"{cwd}/custom_line_groups.yaml" # Invoke the command to generate a Performance Diagram based on # the test_custom_performance_diagram.yaml custom config file. @@ -22,15 +24,14 @@ def cleanup(): # remove the line.png and .points files # from any previous runs try: - path = os.getcwd() plot_file = 'line_groups.png' points_file_1 = 'line_groups.points1' points_file_2 = 'line_groups.points2' html_file = 'line_groups.html' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file_1)) - os.remove(os.path.join(path, points_file_2)) - os.remove(os.path.join(path, html_file)) + os.remove(os.path.join(cwd, plot_file)) + os.remove(os.path.join(cwd, points_file_1)) + os.remove(os.path.join(cwd, points_file_2)) + os.remove(os.path.join(cwd, html_file)) except OSError as e: # Typically when files have already been removed or # don't exist. Ignore. @@ -38,7 +39,7 @@ def cleanup(): @pytest.mark.parametrize("test_input,expected", - (["./line_groups.png", True], ["./line_groups.points1", True])) + ([f"{cwd}/line_groups.png", True], [f"{cwd}/line_groups.points1", True])) def test_files_exist(setup, test_input, expected): ''' Checking that the plot and data files are getting created @@ -53,27 +54,27 @@ def test_images_match(setup): newly created plot to verify that the plot hasn't changed in appearance. ''' - path = os.getcwd() plot_file = 'line_groups.png' - actual_file = os.path.join(path, plot_file) - comparison = CompareImages('./line_groups_expected.png', actual_file) + actual_file = os.path.join(cwd, plot_file) + comparison = CompareImages(f'{cwd}/line_groups_expected.png', actual_file) assert comparison.mssim == 1 cleanup() @pytest.mark.parametrize("test_input,expected", - (["./intermed_files/line_groups.png", True], ["./intermed_files/line_groups.points1", True])) + ([f"{cwd}/intermed_files/line_groups.png", True], [f"{cwd}/intermed_files/line_groups.points1", True])) def test_files_exist( test_input, expected): ''' Checking that the plot and data files are getting created ''' - intermed_dir = './intermed_files' + intermed_dir = f'{cwd}/intermed_files' try: os.mkdir(intermed_dir) except FileExistsError as e: pass - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./custom_line_groups2.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd + custom_config_filename = f"{cwd}/custom_line_groups2.yaml" # Invoke the command to generate a Performance Diagram based on # the test_custom_performance_diagram.yaml custom config file. @@ -82,8 +83,7 @@ def test_files_exist( test_input, expected): # remove the files that were created, cleanup() isn't applicable for this test. try: - path = os.getcwd() - subdir = os.path.join(path, intermed_dir) + subdir = os.path.join(cwd, intermed_dir) plot_file = 'line_groups.png' points_file_1 = 'line_groups.points1' points_file_2 = 'line_groups.points2' diff --git a/test/line/test_line_plot.py b/test/line/test_line_plot.py index 203de672..bb4972dc 100644 --- a/test/line/test_line_plot.py +++ b/test/line/test_line_plot.py @@ -3,6 +3,7 @@ import os from metplotpy.plots.line import line as l +cwd = os.path.dirname(__file__) # from metcalcpy.compare_images import CompareImages @@ -13,8 +14,9 @@ def setup(): cleanup() # Set up the METPLOTPY_BASE so that met_plot.py will correctly find # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_line.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd + custom_config_filename = f"{cwd}/custom_line.yaml" # Invoke the command to generate a line plot based on # the custom config file. @@ -25,13 +27,12 @@ def cleanup(): # remove the line.png and .points files # from any previous runs try: - path = os.getcwd() plot_file = 'line.png' points_file_1 = 'line.points1' points_file_2 = 'line.points2' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file_1)) - os.remove(os.path.join(path, points_file_2)) + os.remove(os.path.join(cwd, plot_file)) + os.remove(os.path.join(cwd, points_file_1)) + os.remove(os.path.join(cwd, points_file_2)) except OSError as e: # Typically when files have already been removed or # don't exist. Ignore. @@ -39,8 +40,8 @@ def cleanup(): @pytest.mark.parametrize("test_input,expected", - (["./line.png", True], ["./line.points1", False], - ["./line.points2", False])) + ([f"{cwd}/line.png", True], [f"{cwd}/line.points1", False], + [f"{cwd}/line.points2", False])) def test_files_exist(setup, test_input, expected): ''' Checking that the plot file is getting created but the @@ -51,8 +52,8 @@ def test_files_exist(setup, test_input, expected): @pytest.mark.parametrize("test_input,expected", - (["./line.png", True], ["./intermed_files/line.points1", True], - ["./intermed_files/line.points2", True])) + ([f"{cwd}/line.png", True], [f"{cwd}/intermed_files/line.points1", True], + [f"{cwd}/intermed_files/line.points2", True])) def test_points_files_exist(test_input, expected): ''' Checking that the plot and point data files are getting created @@ -60,12 +61,13 @@ def test_points_files_exist(test_input, expected): # create the intermediate directory to store the .points1 and .points2 files try: - os.mkdir(os.path.join(os.getcwd(), 'intermed_files')) + os.mkdir(os.path.join(cwd, 'intermed_files')) except FileExistsError as e: pass - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_line2.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd + custom_config_filename = f"{cwd}/custom_line2.yaml" l.main(custom_config_filename) # Test for expected values @@ -73,12 +75,11 @@ def test_points_files_exist(test_input, expected): # cleanup intermediate files and plot try: - path = os.getcwd() plot_file = 'line.png' points_file_1 = 'line.points1' points_file_2 = 'line.points2' - intermed_path = os.path.join(path, 'intermed_files') - os.remove(os.path.join(path, plot_file)) + intermed_path = os.path.join(cwd, 'intermed_files') + os.remove(os.path.join(cwd, plot_file)) os.remove(os.path.join(intermed_path, points_file_1)) os.remove(os.path.join(intermed_path, points_file_2)) except OSError as e: @@ -94,19 +95,20 @@ def test_no_nans_in_points_files(): # create the intermediate directory to store the .points1 and .points2 files try: - os.mkdir(os.path.join(os.getcwd(), 'intermed_files')) + os.mkdir(os.path.join(cwd, 'intermed_files')) except FileExistsError as e: pass - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_line2.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd + custom_config_filename = f"{cwd}/custom_line2.yaml" l.main(custom_config_filename) # Check for NaN's in the intermediate files, line.points1 and line.points2 # Fail if there are any NaN's-this indicates something went wrong with the # line_series.py module's _create_series_points() method. nans_found = False - with open("./intermed_files/line.points1", "r") as f: + with open(f"{cwd}/intermed_files/line.points1", "r") as f: data = f.read() if "NaN" in data: nans_found = True @@ -114,7 +116,7 @@ def test_no_nans_in_points_files(): assert nans_found == False # Now check line.points2 - with open("./intermed_files/line.points2", "r") as f: + with open(f"{cwd}/intermed_files/line.points2", "r") as f: data = f.read() if "NaN" in data: nans_found = True @@ -122,7 +124,7 @@ def test_no_nans_in_points_files(): assert nans_found == False # Verify that the nan.points1 file does indeed trigger a "nans_found" - with open("./nan.points1", "r") as f: + with open(f"{cwd}/nan.points1", "r") as f: data = f.read() if "NaN" in data: nans_found = True @@ -132,12 +134,11 @@ def test_no_nans_in_points_files(): # cleanup intermediate files and plot try: - path = os.getcwd() plot_file = 'line.png' points_file_1 = 'line.points1' points_file_2 = 'line.points2' - intermed_path = os.path.join(path, 'intermed_files') - os.remove(os.path.join(path, plot_file)) + intermed_path = os.path.join(cwd, 'intermed_files') + os.remove(os.path.join(cwd, plot_file)) os.remove(os.path.join(intermed_path, points_file_1)) os.remove(os.path.join(intermed_path, points_file_2)) except OSError as e: @@ -153,10 +154,9 @@ def test_images_match(setup): newly created plot to verify that the plot hasn't changed in appearance. ''' - path = os.getcwd() plot_file = './line.png' - actual_file = os.path.join(path, plot_file) - comparison = CompareImages('./line_expected.png', actual_file) + actual_file = os.path.join(cwd, plot_file) + comparison = CompareImages(f'{cwd}/line_expected.png', actual_file) # !!!WARNING!!! SOMETIMES FILE SIZES DIFFER IN SPITE OF THE PLOTS LOOKING THE SAME # THIS TEST IS NOT 100% RELIABLE because of differences in machines, OS, etc. @@ -174,16 +174,16 @@ def test_new_images_match(): # Set up the METPLOTPY_BASE so that met_plot.py will correctly find # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_line_from_zero.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd + custom_config_filename = f"{cwd}/custom_line_from_zero.yaml" # Invoke the command to generate a Performance Diagram based on # the test_custom_performance_diagram.yaml custom config file. l.main(custom_config_filename) - path = os.getcwd() plot_file = 'line_from_zero.png' - actual_file = os.path.join(path, plot_file) - comparison = CompareImages('./line_expected_from_zero.png', actual_file) + actual_file = os.path.join(cwd, plot_file) + comparison = CompareImages(f'{cwd}/line_expected_from_zero.png', actual_file) # !!!WARNING!!! SOMETIMES FILE SIZES DIFFER IN SPITE OF THE PLOTS LOOKING THE SAME # THIS TEST IS NOT 100% RELIABLE because of differences in machines, OS, etc. @@ -191,9 +191,8 @@ def test_new_images_match(): # cleanup plot try: - path = os.getcwd() plot_file = 'line_from_zero.png' - os.remove(os.path.join(path, plot_file)) + os.remove(os.path.join(cwd, plot_file)) except OSError as e: # Typically when files have already been removed or # don't exist. Ignore. @@ -215,21 +214,21 @@ def test_vertical_plot(): except FileExistsError as e: pass - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "mv_custom_vert_line.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIRR'] = cwd + custom_config_filename = f"{cwd}/mv_custom_vert_line.yaml" l.main(custom_config_filename) try: - path = os.getcwd() plot_file = 'vert_line_plot.png' points_file_1 = 'vert_line_plot.points1' points_file_2 = 'vert_line_plot.points2' - intermed_path = os.path.join(path, 'intermed_files') + intermed_path = os.path.join(cwd, 'intermed_files') # Retrieve the .points1 files generated by METviewer and METplotpy respectively - mv_df = pd.read_csv('./intermed_files/vert_plot_y1_from_metviewer.points1', + mv_df = pd.read_csv(f'{cwd}/intermed_files/vert_plot_y1_from_metviewer.points1', sep=" ", header=None) - mpp_df = pd.read_csv('./intermed_files/vert_line_plot.points1', sep=" ", + mpp_df = pd.read_csv(f'{cwd}/intermed_files/vert_line_plot.points1', sep=" ", header=None) # ----------------------- @@ -259,7 +258,7 @@ def test_vertical_plot(): os.remove(os.path.join(path, plot_file)) os.remove(os.path.join(intermed_path, points_file_1)) os.remove(os.path.join(intermed_path, points_file_2)) - os.remove('./intermed_files/vert_plot_y1_from_metviewer.points1') + os.remove(f'{cwd}/intermed_files/vert_plot_y1_from_metviewer.points1') except OSError as e: # Typically when files have already been removed or # don't exist. Ignore. @@ -274,25 +273,25 @@ def test_fixed_var_val(): """ # Set up the METPLOTPY_BASE so that met_plot.py will correctly find # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "fbias_fixed_vars_vals.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd + custom_config_filename = f"{cwd}/fbias_fixed_vars_vals.yaml" # Invoke the command to generate a line plot based on # the custom config file. l.main(custom_config_filename) - expected_points = "../intermed_files/mv_fixed_var_vals.points1" + expected_points = f"{cwd}/intermed_files/mv_fixed_var_vals.points1" try: - path = os.getcwd() plot_file = 'fbias_fixed_vars_reformatted_input.png' - intermed_path = os.path.join(path, 'intermed_files') + intermed_path = os.path.join(cwd, 'intermed_files') # Retrieve the .points1 files generated by METviewer and METplotpy respectively - mv_df = pd.read_csv('./intermed_files/mv_fixed_var_vals.points1', + mv_df = pd.read_csv(f'{cwd}/intermed_files/mv_fixed_var_vals.points1', sep="\t", header=None) - mpp_df = pd.read_csv('./fbias.points1', sep="\t", header=None) + mpp_df = pd.read_csv(f'{cwd}/fbias.points1', sep="\t", header=None) # Verify that the values in the generated points1 file are identical # to those in the METviewer points1 file. @@ -311,12 +310,12 @@ def test_fixed_var_val(): assert mv_df.iloc[i][0] == mpp_df.iloc[i][0] # Clean up the fbias.points1, fbias.points2, and .png files - os.remove('fbias.points1') - os.remove('fbias.points2') - os.remove('fbias_fixed_vars_reformatted_input.png') - os.remove('./intermed_files/mv_fixed_var_vals.points1') + os.remove(f'{cwd}/fbias.points1') + os.remove(f'{cwd}/fbias.points2') + os.remove(f'{cwd}/fbias_fixed_vars_reformatted_input.png') + os.remove(f'{cwd}/intermed_files/mv_fixed_var_vals.points1') os.remove(expected_points) - os.rmdir('./intermed_files') + os.rmdir(f'{cwd}/intermed_files') except OSError as e: # Typically when files have already been removed or @@ -335,20 +334,19 @@ def test_fixed_var_val_image_compare(): # Set up the METPLOTPY_BASE so that met_plot.py will correctly find # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "fbias_fixed_vars_vals.yaml" + os.environ['METPLOTPY_BASE'] = f"{cwd}/../../" + os.environ['TEST_DIR'] = cwd + custom_config_filename = f"{cwd}/fbias_fixed_vars_vals.yaml" # Invoke the command to generate a line plot based on # the custom config file. l.main(custom_config_filename) - expected_plot = "./expected_fbias_fixed_vars.png" + expected_plot = f"{cwd}/expected_fbias_fixed_vars.png" try: - path = os.getcwd() plot_file = 'fbias_fixed_vars.png' - - created_file = os.path.join(path, plot_file) + created_file = os.path.join(cwd, plot_file) # first verify that the output plot was created if os.path.exists(created_file): @@ -364,8 +362,8 @@ def test_fixed_var_val_image_compare(): assert comparison.mssim == 1 # Clean up the fbias.points1, fbias.points2 and the png files - os.remove('fbias.points1') - os.remove('fbias.points2') + os.remove(f'{cwd}/fbias.points1') + os.remove(f'{cwd}/fbias.points2') os.remove(created_file) except OSError as e: diff --git a/test/mpr_plot/mpr_plot_custom.yaml b/test/mpr_plot/mpr_plot_custom.yaml index 7c20cf56..aad4b441 100644 --- a/test/mpr_plot/mpr_plot_custom.yaml +++ b/test/mpr_plot/mpr_plot_custom.yaml @@ -1,5 +1,5 @@ wind_rose: True -plot_filename: mpr_plots.png +plot_filename: !ENV '${TEST_DIR}/mpr_plots.png' wind_rose_breaks: - 0.0 - 1.0 @@ -28,7 +28,7 @@ angularaxis_ticktext: - 'S' - 'W' mpr_file_list: - - ./point_stat_mpr.txt + - !ENV '${TEST_DIR}/point_stat_mpr.txt' width: 1200 height: 7500 marker_color: 'rgb(194,189,251)' diff --git a/test/mpr_plot/test_mpr_plot.py b/test/mpr_plot/test_mpr_plot.py index df5bf1ee..ee724505 100644 --- a/test/mpr_plot/test_mpr_plot.py +++ b/test/mpr_plot/test_mpr_plot.py @@ -3,50 +3,39 @@ from metplotpy.plots.mpr_plot import mpr_plot #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) +CLEANUP_FILES = ['mpr_plots.png'] + @pytest.fixture -def setup(): +def setup(setup_env, remove_files): # Cleanup the plotfile and point1 output file from any previous run - cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "mpr_plot_custom.yaml" + remove_files(cwd, CLEANUP_FILES) + setup_env(cwd) + custom_config_filename = f"{cwd}/mpr_plot_custom.yaml" # Invoke the command to generate a Performance Diagram based on # the custom_performance_diagram.yaml custom config file. mpr_plot.main(custom_config_filename) -def cleanup(): - # remove the line.png and .points files - # from any previous runs - try: - path = os.getcwd() - plot_file = 'mpr_plots.png' - os.remove(os.path.join(path, plot_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - - @pytest.mark.parametrize("test_input, expected", - (["./mpr_plots.png", True],["./mpr_plots_expected.png", True])) -def test_files_exist(setup, test_input, expected): + (["mpr_plots.png", True], ["mpr_plots_expected.png", True])) +def test_files_exist(setup, test_input, expected, remove_files): """ Checking that the plot and data files are getting created """ - assert os.path.isfile(test_input) == expected - cleanup() + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, CLEANUP_FILES) + @pytest.mark.skip("unreliable-sometimes fails due to differences between machines.") -def test_images_match(setup): +def test_images_match(setup, remove_files): """ Compare an expected plot with the newly created plot to verify that the plot hasn't changed in appearance. """ - comparison = CompareImages('./mpr_plots_expected.png', './mpr_plots.png') + comparison = CompareImages(f'{cwd}/mpr_plots_expected.png', f'{cwd}/mpr_plots.png') assert comparison.mssim == 1 - cleanup() + remove_files(cwd, CLEANUP_FILES) diff --git a/test/performance_diagram/custom_performance_diagram.yaml b/test/performance_diagram/custom_performance_diagram.yaml index 03bb45fd..6527cb1e 100644 --- a/test/performance_diagram/custom_performance_diagram.yaml +++ b/test/performance_diagram/custom_performance_diagram.yaml @@ -104,8 +104,8 @@ series_line_style: - "--" # dotted line - ":" -stat_input: ./plot_20200317_151252.data -plot_filename: ./performance_diagram_actual.png +stat_input: !ENV '${TEST_DIR}/plot_20200317_151252.data' +plot_filename: !ENV '${TEST_DIR}/performance_diagram_actual.png' event_equal: False # To save your log output to a file, specify a path and filename and uncomment the line below. Make sure you have diff --git a/test/performance_diagram/custom_performance_diagram_defaultpoints1.yaml b/test/performance_diagram/custom_performance_diagram_defaultpoints1.yaml index 2aaa321d..d6e5480f 100644 --- a/test/performance_diagram/custom_performance_diagram_defaultpoints1.yaml +++ b/test/performance_diagram/custom_performance_diagram_defaultpoints1.yaml @@ -102,6 +102,6 @@ series_line_style: - "--" # dotted line - ":" -stat_input: ./plot_20200317_151252.data -plot_filename: ./performance_diagram_defaultpoints1.png +stat_input: !ENV '${TEST_DIR}/plot_20200317_151252.data' +plot_filename: !ENV '${TEST_DIR}/performance_diagram_defaultpoints1.png' event_equalize: False diff --git a/test/performance_diagram/custom_performance_diagram_points1.yaml b/test/performance_diagram/custom_performance_diagram_points1.yaml index 67a62c84..aaf20796 100644 --- a/test/performance_diagram/custom_performance_diagram_points1.yaml +++ b/test/performance_diagram/custom_performance_diagram_points1.yaml @@ -5,7 +5,7 @@ title: Performance Diagram ("Custom title") xaxis: Success Ratio dump_points_1: 'True' -points_path: './intermed_files' +points_path: !ENV '${TEST_DIR}/intermed_files' # support two y-axes yaxis_1: Probability of Detection (PODY) @@ -99,6 +99,6 @@ series_line_style: - "--" # dotted line - ":" -stat_input: ./plot_20200317_151252.data -plot_filename: ./performance_diagram_actual_points1.png +stat_input: !ENV '${TEST_DIR}/plot_20200317_151252.data' +plot_filename: !ENV '${TEST_DIR}/performance_diagram_actual_points1.png' event_equalize: False diff --git a/test/performance_diagram/test_performance_diagram.py b/test/performance_diagram/test_performance_diagram.py index 47a8961b..511e8a8d 100644 --- a/test/performance_diagram/test_performance_diagram.py +++ b/test/performance_diagram/test_performance_diagram.py @@ -4,14 +4,17 @@ from metplotpy.plots.performance_diagram import performance_diagram as pd #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) + +CLEANUP_FILES = ['performance_diagram_actual.png', 'plot_20200317_151252.points1'] + + @pytest.fixture -def setup(): +def setup(setup_env, remove_files): + setup_env(cwd) # Cleanup the plotfile and point1 output file from any previous run - # cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./custom_performance_diagram.yaml" + remove_files(cwd, CLEANUP_FILES) + custom_config_filename = f"{cwd}/custom_performance_diagram.yaml" # print("\n current directory: ", os.getcwd()) # print("\ncustom config file: ", custom_config_filename, '\n') @@ -22,43 +25,27 @@ def setup(): pd.main(custom_config_filename) -def cleanup(): - # remove the performance_diagram_expected.png and plot_20200317_151252.points1 files - # from any previous runs - try: - path = os.getcwd() - plot_file = 'performance_diagram_actual.png' - points_file = 'plot_20200317_151252.points1' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - - -@pytest.mark.parametrize("test_input,expected_bool",(["./performance_diagram_actual.png", True], ["./plot_20200317_151252.points1", False])) -def test_plot_exists(setup, test_input, expected_bool): +@pytest.mark.parametrize("test_input,expected_bool",(["performance_diagram_actual.png", True], ["plot_20200317_151252.points1", False])) +def test_plot_exists(setup, test_input, expected_bool, remove_files): ''' Checking that only the plot file is getting created and the .point1 data file is not (dump_points_1 is 'False' in the test config file) ''' - assert os.path.isfile(test_input) == expected_bool - # cleanup() + assert os.path.isfile(f"{cwd}/{test_input}") == expected_bool + remove_files(cwd, CLEANUP_FILES) @pytest.mark.parametrize("test_input,expected_bool",(["./performance_diagram_actual_points1.png", True], ["./intermed_files/plot_20200317_151252.points1", True])) -def test_files_exist(test_input, expected_bool): +def test_files_exist(setup_env, test_input, expected_bool, remove_files): ''' Checking that only the plot file is getting created and the .point1 data file is not (dump_points_1 is 'False' in the test config file) ''' - - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./custom_performance_diagram_points1.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_performance_diagram_points1.yaml" try: - os.mkdir(os.path.join(os.getcwd(), 'intermed_files')) - except FileExistsError as e: + os.mkdir(os.path.join(cwd, 'intermed_files')) + except FileExistsError: pass # Invoke the command to generate a Performance Diagram based on @@ -67,32 +54,25 @@ def test_files_exist(test_input, expected_bool): # or augment settings defined by the default config file. pd.main(custom_config_filename) - assert os.path.isfile(test_input) == expected_bool + assert os.path.isfile(f"{cwd}/{test_input}") == expected_bool + remove_files(cwd, ['performance_diagram_actual_points1.png', 'intermed_files/plot_20200317_151252.points1']) try: - path = os.getcwd() - plot_file = 'performance_diagram_actual_points1.png' - points_file = './intermed_files/plot_20200317_151252.points1' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file)) - os.rmdir(os.path.join(path, './intermed_files')) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. + os.rmdir(os.path.join(cwd, 'intermed_files')) + except OSError: pass -@pytest.mark.parametrize("test_input,expected_bool",(["./performance_diagram_defaultpoints1.png", True], ["./plot_20200317_151252.points1", True])) -def test_files_exist(test_input, expected_bool): +@pytest.mark.parametrize("test_input,expected_bool", (["performance_diagram_defaultpoints1.png", True], ["plot_20200317_151252.points1", True])) +def test_files_exist(setup_env, test_input, expected_bool, remove_files): ''' Checking that only the plot file is getting created and the .point1 data file is not (dump_points_1 is 'False' in the test config file) ''' - - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./custom_performance_diagram_defaultpoints1.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_performance_diagram_defaultpoints1.yaml" try: - os.mkdir(os.path.join(os.getcwd(), 'intermed_files')) - except FileExistsError as e: + os.mkdir(os.path.join(cwd, 'intermed_files')) + except FileExistsError: pass # Invoke the command to generate a Performance Diagram based on @@ -101,34 +81,26 @@ def test_files_exist(test_input, expected_bool): # or augment settings defined by the default config file. pd.main(custom_config_filename) - assert os.path.isfile(test_input) == expected_bool + assert os.path.isfile(f"{cwd}/{test_input}") == expected_bool + remove_files(cwd, ['performance_diagram_defaultpoints1.png', 'plot_20200317_151252.points1']) try: - path = os.getcwd() - plot_file = 'performance_diagram_defaultpoints1.png' - points_file = 'plot_20200317_151252.points1' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file)) - os.rmdir(os.path.join(path,'./intermed_files')) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. + os.rmdir(os.path.join(cwd, 'intermed_files')) + except OSError: pass @pytest.mark.skip() -def test_images_match(setup): +def test_images_match(setup, remove_files): ''' Compare an expected plot with the newly created plot to verify that the plot hasn't changed in appearance. ''' - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./custom_performance_diagram.yaml" + custom_config_filename = f"{cwd}/custom_performance_diagram.yaml" pd.main(custom_config_filename) - path = os.getcwd() plot_file = 'performance_diagram_actual.png' - actual_file = os.path.join(path, plot_file) - comparison = CompareImages('./performance_diagram_expected.png',actual_file) + actual_file = os.path.join(cwd, plot_file) + comparison = CompareImages(f'{cwd}/performance_diagram_expected.png', actual_file) assert comparison.mssim == 1 - cleanup() + remove_files(cwd, CLEANUP_FILES) diff --git a/test/reliability_diagram/custom_reliability_diagram.yaml b/test/reliability_diagram/custom_reliability_diagram.yaml index be310b07..30c8e6bb 100644 --- a/test/reliability_diagram/custom_reliability_diagram.yaml +++ b/test/reliability_diagram/custom_reliability_diagram.yaml @@ -81,7 +81,7 @@ plot_ci: - none plot_disp: - 'True' -plot_filename: ./custom_reliability_diagram.png +plot_filename: !ENV '${TEST_DIR}/custom_reliability_diagram.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -111,7 +111,7 @@ series_val_2: {} show_nstats: 'False' show_signif: - 'False' -stat_input: ./plot_20210311_145053.data +stat_input: !ENV '${TEST_DIR}/plot_20210311_145053.data' summary_curves: [] sync_yaxes: 'False' title: test title diff --git a/test/reliability_diagram/custom_reliability_points1.yaml b/test/reliability_diagram/custom_reliability_points1.yaml index c2ba8608..adcbb785 100644 --- a/test/reliability_diagram/custom_reliability_points1.yaml +++ b/test/reliability_diagram/custom_reliability_points1.yaml @@ -96,7 +96,7 @@ plot_stat: median plot_type: png16m plot_units: in plot_width: 11.0 -points_path: ./intermed_files +points_path: !ENV '${TEST_DIR}/intermed_files' random_seed: null rely_event_hist: 'True' series_line_style: @@ -180,8 +180,8 @@ ytlab_horiz: 0.5 ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -plot_filename: ./intermed_files/reliability.png -stat_input: ./reliability.data +plot_filename: !ENV '${TEST_DIR}/intermed_files/reliability.png' +stat_input: !ENV '${TEST_DIR}/reliability.data' noskill_line_col: 'green' reference_line_col: 'blue' show_legend: diff --git a/test/reliability_diagram/custom_reliability_use_defaults.yaml b/test/reliability_diagram/custom_reliability_use_defaults.yaml index 344a6bb1..b2a2d711 100644 --- a/test/reliability_diagram/custom_reliability_use_defaults.yaml +++ b/test/reliability_diagram/custom_reliability_use_defaults.yaml @@ -179,8 +179,8 @@ ytlab_horiz: 0.5 ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -plot_filename: ./reliability.png -stat_input: ./reliability.data +plot_filename: !ENV '${TEST_DIR}/reliability.png' +stat_input: !ENV '${TEST_DIR}/reliability.data' show_legend: - 'True' - 'True' diff --git a/test/reliability_diagram/test_reliability_diagram.py b/test/reliability_diagram/test_reliability_diagram.py index a6413636..0ceeb222 100644 --- a/test/reliability_diagram/test_reliability_diagram.py +++ b/test/reliability_diagram/test_reliability_diagram.py @@ -4,81 +4,62 @@ from metplotpy.plots.reliability_diagram import reliability as r #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) +CLEANUP_FILES = ['reliability.png', 'reliability.points1'] + @pytest.fixture -def setup(): +def setup(setup_env, remove_files): # Cleanup the plotfile and point1 output file from any previous run - cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./custom_reliability_use_defaults.yaml" + remove_files(cwd, CLEANUP_FILES) + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_reliability_use_defaults.yaml" # Invoke the command to generate a Performance Diagram based on # the test_custom_performance_diagram.yaml custom config file. r.main(custom_config_filename) -def cleanup(): - # remove the line.png and .points files - # from any previous runs - try: - path = os.getcwd() - plot_file = 'reliability.png' - points_file_1 = 'reliability.points1' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file_1)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - - @pytest.mark.parametrize("test_input,expected", - (["./reliability.png", True], ["./reliability.points1", True])) -def test_files_exist(setup, test_input, expected): + ([CLEANUP_FILES[0], True], [CLEANUP_FILES[1], True])) +def test_files_exist(setup, test_input, expected, remove_files): ''' Checking that the plot and data files are getting created ''' - assert os.path.isfile(test_input) == expected - cleanup() + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, CLEANUP_FILES) + @pytest.mark.skip("depends on machine on which this is run") -def test_images_match(setup): +def test_images_match(setup, remove_files): ''' Compare an expected plot with the newly created plot to verify that the plot hasn't changed in appearance. ''' - comparison = CompareImages('reliability.png', 'reliability_expected.png') + comparison = CompareImages(f'{cwd}/{CLEANUP_FILES[0]}', f'{cwd}/{CLEANUP_FILES[1]}') assert comparison.mssim == 1 - cleanup() + remove_files(cwd, CLEANUP_FILES) + @pytest.mark.parametrize("test_input,expected", - (["./intermed_files/reliability.png", True], ["./intermed_files/reliability.points1", True])) -def test_files_exist(test_input, expected): + (["intermed_files/reliability.png", True], ["intermed_files/reliability.points1", True])) +def test_files_exist(setup_env, test_input, expected, remove_files): ''' Checking that the plot and data files are getting created ''' try: - os.mkdir(os.path.join(os.getcwd(), 'intermed_files')) - except FileExistsError as e: + os.mkdir(os.path.join(cwd, 'intermed_files')) + except FileExistsError: pass - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./custom_reliability_points1.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_reliability_points1.yaml" r.main(custom_config_filename) - assert os.path.isfile(test_input) == expected + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, ['intermed_files/reliability.png', 'intermed_files/reliability.points1']) try: - path = os.getcwd() - plot_file = 'reliability.png' - points_file = 'reliability.points1' - subdir = os.path.join(path, './intermed_files') - os.remove(os.path.join(subdir, plot_file)) - os.remove(os.path.join(subdir, points_file)) - os.rmdir(subdir) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. + os.rmdir(os.path.join(cwd, 'intermed_files')) + except OSError: pass diff --git a/test/revision_box/custom2_revision_box.yaml b/test/revision_box/custom2_revision_box.yaml index 600dd96b..2334f4f8 100644 --- a/test/revision_box/custom2_revision_box.yaml +++ b/test/revision_box/custom2_revision_box.yaml @@ -82,7 +82,7 @@ plot_caption: '' plot_disp: - 'True' - 'True' -plot_filename: ./intermed_files/revision_box.png +plot_filename: !ENV '${TEST_DIR}/intermed_files/revision_box.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -99,7 +99,7 @@ series_val_1: model: - AFWAOCv3.5.1_d01 - NoahMPv3.5.1_d01 -stat_input: ./revision_box.data +stat_input: !ENV '${TEST_DIR}/revision_box.data' title: test title title_align: 0.5 title_offset: -2 @@ -130,7 +130,7 @@ ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -points_path: ./intermed_files +points_path: !ENV '${TEST_DIR}/intermed_files' show_legend: - 'True' - 'True' \ No newline at end of file diff --git a/test/revision_box/custom_revision_box.yaml b/test/revision_box/custom_revision_box.yaml index 55760b48..fed2f274 100644 --- a/test/revision_box/custom_revision_box.yaml +++ b/test/revision_box/custom_revision_box.yaml @@ -82,7 +82,7 @@ plot_caption: '' plot_disp: - 'True' - 'True' -plot_filename: ./revision_box.png +plot_filename: !ENV '${TEST_DIR}/revision_box.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -99,7 +99,7 @@ series_val_1: model: - AFWAOCv3.5.1_d01 - NoahMPv3.5.1_d01 -stat_input: ./revision_box.data +stat_input: !ENV '${TEST_DIR}/revision_box.data' title: test title title_align: 0.5 title_offset: -2 diff --git a/test/revision_box/test_revision_box.py b/test/revision_box/test_revision_box.py index 44760e65..6932327f 100644 --- a/test/revision_box/test_revision_box.py +++ b/test/revision_box/test_revision_box.py @@ -3,85 +3,60 @@ from metplotpy.plots.revision_box import revision_box #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) +CLEANUP_FILES = ['revision_box.png', 'revision_box.points1'] + @pytest.fixture -def setup(): +def setup(remove_files, setup_env): # Cleanup the plotfile and point1 output file from any previous run - cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_revision_box.yaml" + remove_files(cwd, CLEANUP_FILES) + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_revision_box.yaml" # Invoke the command to generate a Revision Box plot based on # the custom_revision_box.yaml custom config file. revision_box.main(custom_config_filename) -def cleanup(): - # remove the .png and .points files - # from any previous runs - try: - path = os.getcwd() - plot_file = 'revision_box.png' - points_file_1 = 'revision_box.points1' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file_1)) - except OSError as er: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - - @pytest.mark.parametrize("test_input, expected", - (["./revision_box.png", True],["./revision_box.points1", True])) -def test_files_exist( setup, test_input, expected): + (["revision_box.png", True], ["revision_box.points1", True])) +def test_files_exist(setup, test_input, expected, remove_files): """ Checking that the plot and data files are getting created """ - assert os.path.isfile(test_input) == expected - cleanup() + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, CLEANUP_FILES) + @pytest.mark.skip("fails on linux hosts") -def test_images_match(setup): +def test_images_match(setup, remove_files): """ Compare an expected plot with the newly created plot to verify that the plot hasn't changed in appearance. """ - comparison = CompareImages('./revision_box_expected.png', './revision_box.png') + comparison = CompareImages(f'{cwd}/revision_box_expected.png', f'{cwd}/revision_box.png') assert comparison.mssim == 1 - cleanup() + remove_files(cwd, CLEANUP_FILES) @pytest.mark.parametrize("test_input, expected", - (["./intermed_files/revision_box.png", True], ["./intermed_files/revision_box.points1", True])) -def test_files_exist(test_input, expected): + (["intermed_files/revision_box.png", True], ["intermed_files/revision_box.points1", True])) +def test_files_exist(setup_env, test_input, expected, remove_files): """ Checking that the plot and data files are getting created """ + setup_env(cwd) try: - os.mkdir(os.path.join(os.getcwd(), './intermed_files')) - except FileExistsError as e: + os.mkdir(os.path.join(cwd, 'intermed_files')) + except FileExistsError: pass - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom2_revision_box.yaml" + + custom_config_filename = f"{cwd}/custom2_revision_box.yaml" # Invoke the command to generate a Bar plot based on # the custom_ens_ss.yaml custom config file. revision_box.main(custom_config_filename) - assert os.path.isfile(test_input) == expected - cleanup() - try: - path = os.getcwd() - subdir = os.path.join(path, "./intermed_files") - plot_file = 'revision_box.png' - points_file_1 = 'revision_box.points1' - os.remove(os.path.join(subdir, plot_file)) - os.remove(os.path.join(subdir, points_file_1)) - os.rmdir(subdir) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, CLEANUP_FILES) diff --git a/test/revision_series/custom2_revision_series.yaml b/test/revision_series/custom2_revision_series.yaml index 59778bdf..9c05a30b 100644 --- a/test/revision_series/custom2_revision_series.yaml +++ b/test/revision_series/custom2_revision_series.yaml @@ -69,7 +69,7 @@ plot_caption: '' plot_disp: - 'True' - 'True' -plot_filename: ./intermed_files/revision_series.png +plot_filename: !ENV '${TEST_DIR}/intermed_files/revision_series.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -95,7 +95,7 @@ series_val_1: - NoahMPv3.5.1_d01 start_from_zero: 'False' -stat_input: ./revision_series.data +stat_input: !ENV '${TEST_DIR}/revision_series.data' title: Revision series title_align: 0.5 title_offset: -2 @@ -125,7 +125,7 @@ ytlab_horiz: 0.5 ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -points_path: ./intermed_files +points_path: !ENV '${TEST_DIR}/intermed_files' show_legend: - 'True' - 'True' \ No newline at end of file diff --git a/test/revision_series/custom_revision_series.yaml b/test/revision_series/custom_revision_series.yaml index d84bbe34..30ddf814 100644 --- a/test/revision_series/custom_revision_series.yaml +++ b/test/revision_series/custom_revision_series.yaml @@ -69,7 +69,7 @@ plot_caption: '' plot_disp: - 'True' - 'True' -plot_filename: ./revision_series.png +plot_filename: !ENV '${TEST_DIR}/revision_series.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -95,7 +95,7 @@ series_val_1: - NoahMPv3.5.1_d01 start_from_zero: 'False' -stat_input: ./revision_series.data +stat_input: !ENV '${TEST_DIR}/revision_series.data' title: Revision series title_align: 0.5 title_offset: -2 diff --git a/test/revision_series/test_revision_series.py b/test/revision_series/test_revision_series.py index 5eb4e6ec..c381900a 100644 --- a/test/revision_series/test_revision_series.py +++ b/test/revision_series/test_revision_series.py @@ -3,47 +3,34 @@ from metplotpy.plots.revision_series import revision_series #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) +CLEANUP_FILES = ('revision_series.png', 'revision_series.points1') + @pytest.fixture -def setup(): +def setup(remove_files, setup_env): # Cleanup the plotfile and point1 output file from any previous run - cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom_revision_series.yaml" + remove_files(cwd, CLEANUP_FILES) + setup_env(cwd) + custom_config_filename = f"{cwd}/custom_revision_series.yaml" # Invoke the command to generate a Revision Series plot based on # the custom_revision_series.yaml custom config file. revision_series.main(custom_config_filename) -def cleanup(): - # remove the .png and .points files - # from any previous runs - try: - path = os.getcwd() - plot_file = 'revision_series.png' - points_file_1 = 'revision_series.points1' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file_1)) - except OSError as er: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - - @pytest.mark.parametrize("test_input, expected", - (["./revision_series.png", True],["./revision_series.points1", True])) -def test_files_exist( setup, test_input, expected): + (["revision_series.png", True], ["revision_series.points1", True])) +def test_files_exist(setup, test_input, expected, remove_files): """ Checking that the plot and data files are getting created """ - assert os.path.isfile(test_input) == expected - cleanup() + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, CLEANUP_FILES) + @pytest.mark.skip("fails on linux hosts") -def test_images_match(setup): +def test_images_match(setup, remove_files): """ Compare an expected plot with the newly created plot to verify that the plot hasn't @@ -51,37 +38,27 @@ def test_images_match(setup): """ comparison = CompareImages('./revision_series_expected.png', './revision_series.png') assert comparison.mssim == 1 - cleanup() + remove_files(cwd, CLEANUP_FILES) @pytest.mark.parametrize("test_input, expected", - (["./intermed_files/revision_series.png", True], ["./intermed_files/revision_series.points1", True])) -def test_files_exist(test_input, expected): + (["intermed_files/revision_series.png", True], + ["intermed_files/revision_series.points1", True])) +def test_files_exist(setup_env, test_input, expected, remove_files): """ Checking that the plot and data files are getting created """ try: - os.mkdir(os.path.join(os.getcwd(), './intermed_files')) - except FileExistsError as e: + os.mkdir(os.path.join(cwd, 'intermed_files')) + except FileExistsError: pass - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "custom2_revision_series.yaml" + + setup_env(cwd) + custom_config_filename = f"{cwd}/custom2_revision_series.yaml" # Invoke the command to generate a Bar plot based on # the custom_ens_ss.yaml custom config file. revision_series.main(custom_config_filename) - assert os.path.isfile(test_input) == expected - cleanup() - try: - path = os.getcwd() - subdir = os.path.join(path, "./intermed_files") - plot_file = 'revision_series.png' - points_file_1 = 'revision_series.points1' - os.remove(os.path.join(subdir, plot_file)) - os.remove(os.path.join(subdir, points_file_1)) - os.rmdir(subdir) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, CLEANUP_FILES) diff --git a/test/roc_diagram/CTC_ROC.yaml b/test/roc_diagram/CTC_ROC.yaml index 57eccbea..f4a72858 100644 --- a/test/roc_diagram/CTC_ROC.yaml +++ b/test/roc_diagram/CTC_ROC.yaml @@ -62,7 +62,7 @@ plot_ci: - none plot_disp: - 'True' -plot_filename: ./CTC_ROC.png +plot_filename: !ENV '${TEST_DIR}/CTC_ROC.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -88,7 +88,7 @@ series_val_2: {} show_nstats: 'False' show_signif: - 'False' -stat_input: CTC_ROC.data +stat_input: !ENV '${TEST_DIR}/CTC_ROC.data' sync_yaxes: 'False' title: test title title_align: 0.5 diff --git a/test/roc_diagram/CTC_ROC_ee.yaml b/test/roc_diagram/CTC_ROC_ee.yaml index 94103e4d..4391d2fd 100644 --- a/test/roc_diagram/CTC_ROC_ee.yaml +++ b/test/roc_diagram/CTC_ROC_ee.yaml @@ -62,7 +62,7 @@ plot_ci: - none plot_disp: - 'True' -plot_filename: ./CTC_ROC_ee.png +plot_filename: !ENV '${TEST_DIR}/CTC_ROC_ee.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -88,7 +88,7 @@ series_val_2: {} show_nstats: 'False' show_signif: - 'False' -stat_input: CTC_ROC.data +stat_input: !ENV '${TEST_DIR}/CTC_ROC.data' sync_yaxes: 'False' title: test title title_align: 0.5 diff --git a/test/roc_diagram/CTC_ROC_summary.yaml b/test/roc_diagram/CTC_ROC_summary.yaml index bc5f4c23..e24ca49a 100644 --- a/test/roc_diagram/CTC_ROC_summary.yaml +++ b/test/roc_diagram/CTC_ROC_summary.yaml @@ -77,7 +77,7 @@ plot_disp: - 'True' - 'True' - 'True' -plot_filename: ./CTC_ROC_summary.png +plot_filename: !ENV '${TEST_DIR}/CTC_ROC_summary.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -123,7 +123,7 @@ show_signif: - 'False' - 'False' start_from_zero: 'False' -stat_input: CTC_ROC_summary.data +stat_input: !ENV '${TEST_DIR}/CTC_ROC_summary.data' summary_curve: median sync_yaxes: 'False' title: test title @@ -174,5 +174,5 @@ ytlab_horiz: 0.5 ytlab_orient: 1 ytlab_perp: 0.5 ytlab_size: 1 -points_path: './intermed_files' +points_path: !ENV '${TEST_DIR}/intermed_files' diff --git a/test/roc_diagram/CTC_ROC_thresh.yaml b/test/roc_diagram/CTC_ROC_thresh.yaml index 61a2dc76..7bbc26ca 100644 --- a/test/roc_diagram/CTC_ROC_thresh.yaml +++ b/test/roc_diagram/CTC_ROC_thresh.yaml @@ -64,7 +64,7 @@ plot_ci: - none plot_disp: - 'True' -plot_filename: ./CTC_ROC_thresh.png +plot_filename: !ENV '${TEST_DIR}/CTC_ROC_thresh.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -90,7 +90,7 @@ series_val_2: {} show_nstats: 'False' show_signif: - 'False' -stat_input: CTC_ROC_thresh.data +stat_input: !ENV '${TEST_DIR}/CTC_ROC_thresh.data' sync_yaxes: 'False' title: test title title_align: 0.5 diff --git a/test/roc_diagram/CTC_ROC_thresh_dump_pts.yaml b/test/roc_diagram/CTC_ROC_thresh_dump_pts.yaml index 41ae9784..3432b2ae 100644 --- a/test/roc_diagram/CTC_ROC_thresh_dump_pts.yaml +++ b/test/roc_diagram/CTC_ROC_thresh_dump_pts.yaml @@ -64,14 +64,14 @@ plot_ci: - none plot_disp: - 'True' -plot_filename: ./CTC_ROC_thresh_dump_pts.png +plot_filename: !ENV '${TEST_DIR}/CTC_ROC_thresh_dump_pts.png' plot_height: 8.5 plot_res: 72 plot_stat: median plot_type: png16m plot_units: in plot_width: 11.0 -points_path: './intermed_files' +points_path: !ENV '${TEST_DIR}/intermed_files' random_seed: null reverse_connection_order: 'True' roc_ctc: true @@ -91,7 +91,7 @@ series_val_2: {} show_nstats: 'False' show_signif: - 'False' -stat_input: CTC_ROC_thresh.data +stat_input: !ENV '${TEST_DIR}/CTC_ROC_thresh.data' sync_yaxes: 'False' title: test title title_align: 0.5 diff --git a/test/roc_diagram/CTC_ROC_thresh_reverse_pts.yaml b/test/roc_diagram/CTC_ROC_thresh_reverse_pts.yaml index ce8717b2..d6d0e4a7 100644 --- a/test/roc_diagram/CTC_ROC_thresh_reverse_pts.yaml +++ b/test/roc_diagram/CTC_ROC_thresh_reverse_pts.yaml @@ -64,7 +64,7 @@ plot_ci: - none plot_disp: - 'True' -plot_filename: ./CTC_ROC_thresh.png +plot_filename: !ENV '${TEST_DIR}/CTC_ROC_thresh.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -90,7 +90,7 @@ series_val_2: {} show_nstats: 'False' show_signif: - 'False' -stat_input: CTC_ROC_thresh.data +stat_input: !ENV '${TEST_DIR}/CTC_ROC_thresh.data' sync_yaxes: 'False' title: test title title_align: 0.5 diff --git a/test/roc_diagram/CTC_wind_reformatted.yaml b/test/roc_diagram/CTC_wind_reformatted.yaml new file mode 100644 index 00000000..3874c2d2 --- /dev/null +++ b/test/roc_diagram/CTC_wind_reformatted.yaml @@ -0,0 +1,144 @@ +--- +add_point_thresholds: 'True' +alpha: 0.05 +box_avg: 'False' +box_boxwex: 0.2 +box_notch: 'False' +box_outline: 'True' +box_pts: 'False' +caption_align: 0.0 +caption_col: '#333333' +caption_offset: 3.0 +caption_size: 0.8 +caption_weight: 1 +cex: 1 +circular_block_bootstrap: 'True' +colors: + - '#ff0000' +con_series: + - 1 +create_html: 'True' +derived_series_1: [] +derived_series_2: [] +dump_points_1: 'False' +dump_points_2: 'False' +event_equal: 'False' +fcst_var_val_1: null +fcst_var_val_2: null +fixed_vars_vals_input: {} +grid_col: '#cccccc' +grid_lty: 3 +grid_lwd: 1 +grid_on: 'True' +grid_x: listX +indy_label: [] +indy_stagger_1: 'False' +indy_stagger_2: 'False' +indy_vals: [] +indy_var: '' +legend_box: o +legend_inset: + x: 0.0 + y: -0.25 +legend_ncol: 3 +legend_size: 0.8 +line_type: None +list_stat_1: [] +list_stat_2: [] +list_static_val: + fcst_var: '' +mar: + - 8 + - 4 + - 5 + - 4 +method: bca +mgp: + - 1 + - 1 + - 0 +num_iterations: 1 +num_threads: -1 +plot_caption: '' +plot_ci: + - none +plot_disp: + - 'True' +plot_filename: !ENV '${TEST_DIR}/CTC_wind_reformatted.png' +plot_height: 8.5 +plot_res: 72 +plot_stat: median +plot_type: png16m +plot_units: in +plot_width: 11.0 +random_seed: null +reverse_connection_order: 'False' +roc_ctc: true +roc_pct: false +series_line_style: + - '-' +series_line_width: + - 1 +series_order: + - 1 +series_symbols: + - . +series_type: + - b +series_val_1: {} +series_val_2: {} +show_nstats: 'False' +show_signif: + - 'False' +stat_input: !ENV '${TEST_DIR}/ctc_reformatted_wind_P500.data' +sync_yaxes: 'False' +title: test title +title_align: 0.5 +title_offset: -2 +title_size: 1.4 +title_weight: 2.0 +user_legend: [] +variance_inflation_factor: 'False' +vert_plot: 'False' +x2lab_align: 0.5 +x2lab_offset: -0.5 +x2lab_size: 0.8 +x2lab_weight: 1 +x2tlab_horiz: 0.5 +x2tlab_orient: 1 +x2tlab_perp: 1 +x2tlab_size: 0.8 +xaxis: test x_label +xaxis_reverse: 'False' +xlab_align: 0.5 +xlab_offset: 2 +xlab_size: 1 +xlab_weight: 1 +xlim: [] +xtlab_decim: 0 +xtlab_horiz: 0.5 +xtlab_orient: 1 +xtlab_perp: -0.75 +xtlab_size: 1 +y2lab_align: 0.5 +y2lab_offset: 1 +y2lab_size: 1 +y2lab_weight: 1 +y2lim: [] +y2tlab_horiz: 0.5 +y2tlab_orient: 1 +y2tlab_perp: 1 +y2tlab_size: 1.0 +yaxis_1: test y_label +yaxis_2: '' +ylab_align: 0.5 +ylab_offset: -2 +ylab_size: 1 +ylab_weight: 1 +ylim: [] +ytlab_horiz: 0.5 +ytlab_orient: 1 +ytlab_perp: 0.5 +ytlab_size: 1 +show_legend: +- 'True' \ No newline at end of file diff --git a/test/roc_diagram/PCT_ROC.yaml b/test/roc_diagram/PCT_ROC.yaml index 4020f310..8d0b8b64 100644 --- a/test/roc_diagram/PCT_ROC.yaml +++ b/test/roc_diagram/PCT_ROC.yaml @@ -62,7 +62,7 @@ plot_ci: - none plot_disp: - 'True' -plot_filename: ./PCT_ROC.png +plot_filename: !ENV '${TEST_DIR}/PCT_ROC.png' plot_height: 8.5 plot_res: 72 plot_stat: median @@ -88,7 +88,7 @@ series_val_2: {} show_nstats: 'False' show_signif: - 'False' -stat_input: PCT_ROC.data +stat_input: !ENV '${TEST_DIR}/PCT_ROC.data' sync_yaxes: 'False' title: test title PCT ROC data diff --git a/test/roc_diagram/ctc_reformatted_wind_P500.data b/test/roc_diagram/ctc_reformatted_wind_P500.data new file mode 100644 index 00000000..6669fb90 --- /dev/null +++ b/test/roc_diagram/ctc_reformatted_wind_P500.data @@ -0,0 +1,15 @@ + version model desc fcst_lead fcst_valid_beg fcst_valid_end fcst_init_beg obs_lead obs_valid_beg obs_valid_end fcst_var fcst_units fcst_lev obs_var obs_units obs_lev obtype vx_mask interp_mthd interp_pnts fcst_thresh obs_thresh cov_thresh alpha line_type total ec_value fn_on fn_oy fy_on fy_oy +185 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 0 2021-05-12 12:00:00 2021-05-12 12:00:00 2021-05-12 12:00:00 0 2021-05-12 11:30:01 2021-05-12 12:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=10.288 >=10.288 -9999 -9999 CTC 65 0.5 18.0 2.0 2.0 43.0 +186 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 0 2021-05-12 12:00:00 2021-05-12 12:00:00 2021-05-12 12:00:00 0 2021-05-12 11:30:01 2021-05-12 12:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=15.433 >=15.433 -9999 -9999 CTC 65 0.5 40.0 2.0 5.0 18.0 +187 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 0 2021-05-12 12:00:00 2021-05-12 12:00:00 2021-05-12 12:00:00 0 2021-05-12 11:30:01 2021-05-12 12:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=2.572 >=2.572 -9999 -9999 CTC 65 0.5 0.0 3.0 0.0 62.0 +188 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 0 2021-05-12 12:00:00 2021-05-12 12:00:00 2021-05-12 12:00:00 0 2021-05-12 11:30:01 2021-05-12 12:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=2.572&&<5.144 >=2.572&&<5.144 -9999 -9999 CTC 65 0.5 56.0 5.0 2.0 2.0 +189 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 0 2021-05-12 12:00:00 2021-05-12 12:00:00 2021-05-12 12:00:00 0 2021-05-12 11:30:01 2021-05-12 12:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=20.577 >=20.577 -9999 -9999 CTC 65 0.5 52.0 2.0 0.0 11.0 +190 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 0 2021-05-12 12:00:00 2021-05-12 12:00:00 2021-05-12 12:00:00 0 2021-05-12 11:30:01 2021-05-12 12:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=25.722 >=25.722 -9999 -9999 CTC 65 0.5 58.0 0.0 0.0 7.0 +191 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 0 2021-05-12 12:00:00 2021-05-12 12:00:00 2021-05-12 12:00:00 0 2021-05-12 11:30:01 2021-05-12 12:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=5.144 >=5.144 -9999 -9999 CTC 65 0.5 5.0 2.0 2.0 56.0 +510 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 60000 2021-05-12 18:00:00 2021-05-12 18:00:00 2021-05-12 12:00:00 0 2021-05-12 17:30:01 2021-05-12 18:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=10.288 >=10.288 -9999 -9999 CTC 1 0.5 1.0 0.0 0.0 0.0 +511 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 60000 2021-05-12 18:00:00 2021-05-12 18:00:00 2021-05-12 12:00:00 0 2021-05-12 17:30:01 2021-05-12 18:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=15.433 >=15.433 -9999 -9999 CTC 1 0.5 1.0 0.0 0.0 0.0 +512 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 60000 2021-05-12 18:00:00 2021-05-12 18:00:00 2021-05-12 12:00:00 0 2021-05-12 17:30:01 2021-05-12 18:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=2.572 >=2.572 -9999 -9999 CTC 1 0.5 0.0 0.0 0.0 1.0 +513 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 60000 2021-05-12 18:00:00 2021-05-12 18:00:00 2021-05-12 12:00:00 0 2021-05-12 17:30:01 2021-05-12 18:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=2.572&&<5.144 >=2.572&&<5.144 -9999 -9999 CTC 1 0.5 0.0 0.0 0.0 1.0 +514 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 60000 2021-05-12 18:00:00 2021-05-12 18:00:00 2021-05-12 12:00:00 0 2021-05-12 17:30:01 2021-05-12 18:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=20.577 >=20.577 -9999 -9999 CTC 1 0.5 1.0 0.0 0.0 0.0 +515 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 60000 2021-05-12 18:00:00 2021-05-12 18:00:00 2021-05-12 12:00:00 0 2021-05-12 17:30:01 2021-05-12 18:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=25.722 >=25.722 -9999 -9999 CTC 1 0.5 1.0 0.0 0.0 0.0 +516 V10.1.1 FV3_GFS_v15p2_CONUS_25km_mem001 60000 2021-05-12 18:00:00 2021-05-12 18:00:00 2021-05-12 12:00:00 0 2021-05-12 17:30:01 2021-05-12 18:30:00 WIND m/s P500 WIND P500 ADPUPA CONUS BILIN 4 >=5.144 >=5.144 -9999 -9999 CTC 1 0.5 1.0 0.0 0.0 0.0 diff --git a/test/roc_diagram/custom_roc_diagram.yaml b/test/roc_diagram/custom_roc_diagram.yaml index 1b5a53a3..3a1e463d 100644 --- a/test/roc_diagram/custom_roc_diagram.yaml +++ b/test/roc_diagram/custom_roc_diagram.yaml @@ -98,8 +98,8 @@ reverse_connection_order: False # Make the plot generated in METviewer interactive create_html: 'True' -stat_input: ./plot_20200507_074426.data -plot_filename: ./roc_diagram_custom.png +stat_input: !ENV '${TEST_DIR}/plot_20200507_074426.data' +plot_filename: !ENV '${TEST_DIR}/roc_diagram_custom.png' # To save your log output to a file, specify a path and filename and uncomment the line below. Make sure you have # permissions to the directory you specify. The default, as specified in the default config file is stdout. diff --git a/test/roc_diagram/test_roc_diagram.py b/test/roc_diagram/test_roc_diagram.py index c1c6ea0a..a5c22d37 100644 --- a/test/roc_diagram/test_roc_diagram.py +++ b/test/roc_diagram/test_roc_diagram.py @@ -7,17 +7,17 @@ # from metcalcpy.compare_images import CompareImages import metcalcpy.util.ctc_statistics as ctc +cwd = os.path.dirname(__file__) + # Fixture used for the image comparison # test. @pytest.fixture -def setup(): +def setup(setup_env): # Cleanup the plotfile and point1 output file from any previous run cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./CTC_ROC_thresh.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/CTC_ROC_thresh.yaml" # print("\n current directory: ", os.getcwd()) # print("\ncustom config file: ", custom_config_filename, '\n') @@ -27,14 +27,12 @@ def setup(): @pytest.fixture -def setup_rev_points(): +def setup_rev_points(setup_env): # Cleanup the plotfile and point1 output file from any previous run - path = os.getcwd() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./CTC_ROC_thresh_reverse_pts.yaml" - print("\n current directory: ", os.getcwd()) + cleanup() + setup_env(cwd) + custom_config_filename = f"{cwd}/CTC_ROC_thresh_reverse_pts.yaml" + print("\n current directory: ", cwd) print("\ncustom config file: ", custom_config_filename, '\n') # Invoke the command to generate a Performance Diagram based on @@ -43,20 +41,15 @@ def setup_rev_points(): @pytest.fixture -def setup_summary(): +def setup_summary(setup_env): # Cleanup the plotfile and point1 output file from any previous run - path = os.getcwd() - - subdir_path = os.path.join(path, 'intermed_files') - - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./CTC_ROC_summary.yaml" + cleanup() + setup_env(cwd) + custom_config_filename = f"{cwd}/CTC_ROC_summary.yaml" try: - os.mkdir(os.path.join(os.getcwd(), 'intermed_files')) - except FileExistsError as e: + os.mkdir(os.path.join(cwd, 'intermed_files')) + except FileExistsError: pass # Invoke the command to generate a Performance Diagram based on @@ -65,22 +58,19 @@ def setup_summary(): @pytest.fixture -def setup_dump_points(): +def setup_dump_points(setup_env): # Cleanup the plotfile and point1 output file from any previous run - path = os.getcwd() + cleanup() + setup_env(cwd) # put any intermediate files in the intermed_files subdirectory of this current # working directory. *NOTE* This MUST match with what you set up in CTC_ROC_thresh.yaml for the # points_path configuration setting. - subdir_path = os.path.join(path, 'intermed_files') - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./CTC_ROC_thresh_dump_pts.yaml" + custom_config_filename = f"{cwd}/CTC_ROC_thresh_dump_pts.yaml" # print("\n current directory: ", os.getcwd()) # print("\ncustom config file: ", custom_config_filename, '\n') try: - os.mkdir(os.path.join(os.getcwd(), 'intermed_files')) - except FileExistsError as e: + os.mkdir(os.path.join(cwd, 'intermed_files')) + except FileExistsError: pass # Invoke the command to generate a Performance Diagram based on @@ -93,28 +83,25 @@ def cleanup(): # from any previous runs # The subdir_path is where the .points1 file will be stored try: - path = os.getcwd() plot_file = 'CTC_ROC_thresh.png' - html_file = '.html' - os.remove(os.path.join(path, html_file)) - os.remove(os.path.join(path, plot_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. + html_file = 'CTC_ROC_thresh.html' + os.remove(os.path.join(cwd, html_file)) + os.remove(os.path.join(cwd, plot_file)) + except OSError: pass @pytest.mark.parametrize("test_input,expected_boolean", - (["./CTC_ROC_thresh_expected.png", True], ["./CTC_ROC_thresh.points1", False])) + (["CTC_ROC_thresh_expected.png", True], ["CTC_ROC_thresh.points1", False])) def test_files_exist(setup, test_input, expected_boolean): ''' Checking that the plot file is getting created but the points1 file is NOT ''' - assert os.path.isfile(test_input) == expected_boolean + assert os.path.isfile(f"{cwd}/{test_input}") == expected_boolean cleanup() -def test_expected_CTC_thresh_dump_points(setup_dump_points): +def test_expected_ctc_thresh_dump_points(setup_dump_points, remove_files): ''' For test data, verify that the points in the .points1 file are in the directory we specified and the values @@ -123,48 +110,25 @@ def test_expected_CTC_thresh_dump_points(setup_dump_points): ''' expected_pody = pd.Series([1, 0.8457663, 0.7634846, 0.5093934, 0.1228585, 0]) expected_pofd = pd.Series([1, 0.0688293, 0.049127, 0.0247044, 0.0048342, 0]) - df = pd.read_csv("./intermed_files/CTC_ROC_thresh.points1", sep='\t', header='infer') + df = pd.read_csv(f"{cwd}/intermed_files/CTC_ROC_thresh.points1", sep='\t', header='infer') pofd = df.iloc[:, 0] pody = df.iloc[:, 1] for index, expected in enumerate(expected_pody): - if ctc.round_half_up(expected) - ctc.round_half_up(pody[index]) == 0.0: - pass - else: - assert False + assert ctc.round_half_up(expected) - ctc.round_half_up(pody[index]) == 0.0 # if we get here, then all elements matched in value and position - assert True # do the same test for pofd for index, expected in enumerate(expected_pofd): - if ctc.round_half_up(expected) - ctc.round_half_up(pofd[index]) == 0.0: - pass - else: - assert False + assert ctc.round_half_up(expected) - ctc.round_half_up(pofd[index]) == 0.0 # different cleanup than what is provided by cleanup() # clean up the intermediate subdirectory and other files - try: - path = os.getcwd() - plot_file = 'CTC_ROC_thresh_dump_pts.png' - html_file = '.html' - points_file = 'CTC_ROC_thresh.points1' - intermed_path = os.path.join(path, "intermed_files") - os.remove(os.path.join(intermed_path, points_file)) - os.rmdir(intermed_path) - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, html_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass + remove_files(cwd, ['CTC_ROC_thresh_dump_pts.png', 'CTC_ROC_thresh_dump_pts.html']) - # if we get here, then all elements matched in value and position - assert True - -def test_expected_CTC_summary(setup_summary): +def test_expected_ctc_summary(setup_summary, remove_files): ''' For test data, verify that the points in the .points1 file are in the directory we specified and the values @@ -174,48 +138,25 @@ def test_expected_CTC_summary(setup_summary): expected_pofd = pd.Series([1, 0.0052708, 0, 1, 0.0084788, 0, 1, 0.0068247, 0]) expected_pody = pd.Series([1, 0.0878715, 0, 1, 0.1166785, 0, 1, 0.1018776, 0]) - df = pd.read_csv("./intermed_files/CTC_ROC_summary.points1", sep='\t', header='infer') + df = pd.read_csv(f"{cwd}/intermed_files/CTC_ROC_summary.points1", sep='\t', header='infer') pofd = df.iloc[:, 0] pody = df.iloc[:, 1] for index, expected in enumerate(expected_pody): - if ctc.round_half_up(expected) - ctc.round_half_up(pody[index]) == 0.0: - pass - else: - assert False + assert ctc.round_half_up(expected) - ctc.round_half_up(pody[index]) == 0.0 # if we get here, then all elements matched in value and position - assert True # do the same test for pofd for index, expected in enumerate(expected_pofd): - if ctc.round_half_up(expected) - ctc.round_half_up(pofd[index]) == 0.0: - pass - else: - assert False + assert ctc.round_half_up(expected) - ctc.round_half_up(pofd[index]) == 0.0 # different cleanup than what is provided by cleanup() # clean up the intermediate subdirectory and other files - try: - path = os.getcwd() - plot_file = 'CTC_ROC_summary.png' - html_file = '.html' - points_file = 'CTC_ROC_summary.points1' - intermed_path = os.path.join(path, "intermed_files") - os.remove(os.path.join(intermed_path, points_file)) - os.rmdir(intermed_path) - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, html_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - - # if we get here, then all elements matched in value and position - assert True + remove_files(cwd, ['CTC_ROC_summary.png', 'CTC_ROC_summary.html']) -def test_expected_CTC_thresh_points_reversed(setup_rev_points): +def test_expected_ctc_thresh_points_reversed(setup_rev_points, remove_files): ''' For test data, verify that the points in the .points1 file match what is expected (within round-off tolerance/acceptable precision) when @@ -225,45 +166,26 @@ def test_expected_CTC_thresh_points_reversed(setup_rev_points): expected_pody = pd.Series([1, 0.8457663, 0.7634846, 0.5093934, 0.1228585, 0]) expected_pofd = pd.Series([1, 0.0688293, 0.0491275, 0.0247044, 0.0048342, 0]) - df = pd.read_csv("./CTC_ROC_thresh.points1", sep='\t', header='infer') + df = pd.read_csv(f"{cwd}/CTC_ROC_thresh.points1", sep='\t', header='infer') pofd = df.iloc[:, 0] pody = df.iloc[:, 1] for index, expected in enumerate(expected_pody): - if ctc.round_half_up(expected) - ctc.round_half_up(pody[index]) == 0.0: - pass - else: - assert False + assert ctc.round_half_up(expected) - ctc.round_half_up(pody[index]) == 0.0 - # if we get here, then all elements matched in value and position - assert True + # if we get here, then all elements matched in value and position # do the same test for pofd for index, expected in enumerate(expected_pofd): - if ctc.round_half_up(expected) - ctc.round_half_up(pofd[index]) == 0.0: - pass - else: - assert False + assert ctc.round_half_up(expected) - ctc.round_half_up(pofd[index]) == 0.0 - # if we get here, then all elements matched in value and position - assert True + # if we get here, then all elements matched in value and position # different cleanup than what is provided by cleanup() - try: - path = os.getcwd() - plot_file = 'CTC_ROC_thresh.png' - points_file = 'CTC_ROC_thresh.points1' - html_file = '.html' - os.remove(os.path.join(path, points_file)) - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, html_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass + remove_files(cwd, ['CTC_ROC_thresh.png', 'CTC_ROC_thresh.points1', 'CTC_ROC_thresh.html']) -def test_ee_returns_empty_df(capsys): +def test_ee_returns_empty_df(capsys, remove_files): ''' use CTC_ROC.data with event equalization set to True. This will result in an empty data frame returned from event equalization. Check for @@ -274,7 +196,7 @@ def test_ee_returns_empty_df(capsys): ''' - custom_config_filename = "./CTC_ROC_ee.yaml" + custom_config_filename = f"{cwd}/CTC_ROC_ee.yaml" roc.main(custom_config_filename) captured = capsys.readouterr() expected = '\nINFO: No resulting data after performing event equalization of axis 1\n' \ @@ -284,18 +206,11 @@ def test_ee_returns_empty_df(capsys): assert captured.out == expected # Clean up - try: - path = os.getcwd() - plot_file = 'CTC_ROC_ee.png' - os.remove(os.path.join(path, plot_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass + remove_files(cwd, ['CTC_ROC_ee.png', 'CTC_ROC_ee.html']) @pytest.mark.skip("skip image comparison") -def test_images_match(setup): +def test_images_match(setup, remove_files): ''' Compare an expected plot with the newly created plot to verify that the plot hasn't @@ -305,47 +220,36 @@ def test_images_match(setup): can sometimes be a different size than the expected (which was generated using the same configuration file and data!) ''' - path = os.getcwd() - plot_file = './CTC_ROC_thresh.png' - actual_file = os.path.join(path, plot_file) - comparison = CompareImages('./CTC_ROC_thresh.png', actual_file) + plot_file = 'CTC_ROC_thresh.png' + actual_file = os.path.join(cwd, plot_file) + comparison = CompareImages(f'{cwd}/CTC_ROC_thresh.png', actual_file) assert comparison.mssim == 1 # cleanup # different cleanup than what is provided by cleanup() - try: - plot_file = 'CTC_ROC_thresh.png' - html_file = '.html' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, html_file)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass + remove_files(cwd, ['CTC_ROC_thresh.png', 'CTC_ROC_thresh.html']) -def test_pct_plot_exists(): +def test_pct_plot_exists(remove_files): ''' Verify that the ROC diagram is generated ''' - custom_config_filename = "./PCT_ROC.yaml" - output_plot = "./PCT_ROC.png" + custom_config_filename = f"{cwd}/PCT_ROC.yaml" + output_plot = f"{cwd}/PCT_ROC.png" print("\n Testing for existence of PCT ROC plot...") roc.main(custom_config_filename) - assert os.path.isfile(output_plot) == True - os.remove(os.path.join(output_plot)) + assert os.path.isfile(output_plot) + remove_files(cwd, ['PCT_ROC.png', 'PCT_ROC.html']) -def test_pct_no_warnings(): +def test_pct_no_warnings(remove_files): ''' Verify that the ROC diagram is generated without FutureWarnings ''' - custom_config_filename = "./PCT_ROC.yaml" - output_plot = "./PCT_ROC.png" - + custom_config_filename = f"{cwd}/PCT_ROC.yaml" print("\n Testing for FutureWarning..") try: @@ -354,6 +258,25 @@ def test_pct_no_warnings(): print("FutureWarning generated") # FutureWarning generated, test fails assert False - else: - assert True - os.remove(os.path.join(output_plot)) + + remove_files(cwd, ['PCT_ROC.png', 'PCT_ROC.html']) + + +def test_ctc_reformatted(remove_files): + ''' + Verify that the ROC diagram is generated successfully + from reformatted CTC data. + ''' + + custom_config_filename = f"{cwd}/CTC_wind_reformatted.yaml" + output_plot = f"{cwd}/CTC_wind_reformatted.png" + + print("\n Testing for presence of the CTC_wind_reformatted.png plot...") + + roc.main(custom_config_filename) + assert os.path.isfile(output_plot) + # Checking for plot size isn't reliable + #expected_filesize = int(43239) + #plot_filesize = int(os.path.getsize(output_plot)) + #assert plot_filesize >= expected_filesize + remove_files(cwd, ['CTC_wind_reformatted.png', 'CTC_wind_reformatted.html']) diff --git a/test/roc_diagram/test_roc_diagram.yaml b/test/roc_diagram/test_roc_diagram.yaml index cf85a3e1..4ccee027 100644 --- a/test/roc_diagram/test_roc_diagram.yaml +++ b/test/roc_diagram/test_roc_diagram.yaml @@ -92,6 +92,6 @@ caption_align: .1 #0 # axis parallel location adjustment # Make the plot generated in METviewer interactive create_html: 'True' # input file is relative to the roc_diagram.py module -stat_input: ./CTC_ROC_thresh.data -plot_filename: ./roc_diagram_actual.png +stat_input: !ENV '${TEST_DIR}/CTC_ROC_thresh.data' +plot_filename: !ENV '${TEST_DIR}/roc_diagram_actual.png' diff --git a/test/skew_t/test_skew_t.py b/test/skew_t/test_skew_t.py index d870b0e7..42e003ec 100644 --- a/test/skew_t/test_skew_t.py +++ b/test/skew_t/test_skew_t.py @@ -6,17 +6,12 @@ from metplotpy.plots.skew_t import skew_t as skew_t # from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) -def test_files_exist(): - ''' - Checking that only the expected plot files are getting created and - input files with only fill/missing data are not created. - ''' - - os.environ['METPLOTPY_BASE'] = "../../" - cur_dir = os.getcwd() - custom_config_filename = os.path.join(cur_dir, "test_skew_t.yaml") +def test_skew_t(setup_env): + setup_env(cwd) + custom_config_filename = os.path.join(cwd, "test_skew_t.yaml") # Invoke the command to generate a skew-T Diagram based on # the test_skew_tm.yaml custom config file. @@ -24,7 +19,7 @@ def test_files_exist(): # Verify that files for the ssh052023 data exists for the 0,6, 12,18,24, 30, 42, # 48, 54, and 60 hour data. - output_dir = os.path.join(cur_dir, 'output' ) + output_dir = os.path.join(cwd, 'output') # Some of these data files have incomplete data so check for the expected hour # plots. @@ -32,7 +27,7 @@ def test_files_exist(): print(f"Output dir: {output_dir}") file_ext = '.png' files_of_interest = [] - for root, dir, files in os.walk(output_dir): + for root, _, files in os.walk(output_dir): for item in files: if item.endswith(file_ext): # print(f"Item of interest: {item}") @@ -40,30 +35,37 @@ def test_files_exist(): base_file = os.path.basename(full_file) files_of_interest.append(base_file) - # List of files for the sh052023 data (which is missing data for hours 66-240). - data_some_missing_data = ['ssh052023_avno_doper_2023010100_diag', - 'ssh052023_avno_doper_2023010106_diag'] + _check_files_exist(files_of_interest) + _check_files_not_created(files_of_interest) + _check_empty_input(files_of_interest) + + # Clean up all png files + shutil.rmtree(output_dir) + # If running without the ' -p no:logging' option, then uncomment to ensure that log + # files are removed. + # shutil.rmtree('./logs') + +def _check_files_exist(files_of_interest): + ''' + Checking that only the expected plot files are getting created and + input files with only fill/missing data are not created. + ''' + # List of files for the sh052023 data (which is missing data for hours 66-240). # Config file is requesting all the available sounding hours - expected_hours_for_2023_010100 = range(0,61,6) - expected_hours_for_2023_010106 = range(0,49,6) + data_some_missing_data = { + '2023010100': range(0, 61, 6), + '2023010106': range(0, 49, 6), + } # Create a list of expected base file names with their expected hours. expected_base_filenames = [] - for base in data_some_missing_data: - # Expected base for expected plot output name of format: - # ssh_052023_avno_doper_202301010[0|6]_diag_[0-9]{1,2}_hr - if base == 'ssh052023_avno_doper_2023010100_diag': - # Working with the 2023010100 date file - for cur_hr in expected_hours_for_2023_010100: - base_hr = base + '_' + str(cur_hr) + '_hr' - expected_base_filenames.append(base_hr) - - elif base == 'ssh052023_avno_doper_2023010106_diag': - # Working with the 2023010106 date - for cur_hr in expected_hours_for_2023_010106: - base_hr = base + '_' + str(cur_hr) + '_hr' - expected_base_filenames.append(base_hr) + # Expected base for expected plot output name of format: + # ssh_052023_avno_doper_202301010[0|6]_diag_[0-9]{1,2}_hr + for filetime, expected_hours in data_some_missing_data.items(): + for cur_hr in expected_hours: + base_hr = f'ssh052023_avno_doper_{filetime}_diag_{cur_hr}_hr' + expected_base_filenames.append(base_hr) # Subset only the files that correspond to the sh052023 data subset_files_of_interest = [] @@ -78,51 +80,13 @@ def test_files_exist(): if expected in subset_files_of_interest: num_found += 1 - if len(subset_files_of_interest) == num_found: - assert True - else: - assert False + assert len(subset_files_of_interest) == num_found - # Clean up all png files - temp_datafile = os.path.join(cur_dir, 'sounding_data.dat') - os.remove(temp_datafile) - shutil.rmtree(output_dir) - # If running without the ' -p no:logging' option, then uncomment to ensure that log - # files are removed. - # shutil.rmtree('./logs') - -def test_files_not_created(): +def _check_files_not_created(files_of_interest): ''' Checking that input files with only fill/missing data are not created. ''' - - os.environ['METPLOTPY_BASE'] = "../../" - cur_dir = os.getcwd() - custom_config_filename = os.path.join(cur_dir, "test_skew_t.yaml") - - # Invoke the command to generate a skew-T Diagram based on - # the test_skew_tm.yaml custom config file. - skew_t.main(custom_config_filename) - - # Verify that files for the ssh052023 data exists for the 0,6, 12,18,24, 30, 42, - # 48, 54, and 60 hour data. - output_dir = os.path.join(cur_dir, 'output' ) - - # Some of these data files have incomplete data so check for the expected hour - # plots. - - print(f"Output dir: {output_dir}") - file_ext = '.png' - files_of_interest = [] - for root, dir, files in os.walk(output_dir): - for item in files: - if item.endswith(file_ext): - # print(f"Item of interest: {item}") - full_file = os.path.join(root, item) - base_file = os.path.basename(full_file) - files_of_interest.append(base_file) - # List of files with no sounding data (9999 for all fields and times) no_sounding_data = ['ssh162023_avno_doper_2023022712_diag', 'ssh162023_avno_doper_2023022800_diag', @@ -153,52 +117,13 @@ def test_files_not_created(): if cur in subsetted_basenames: fail_counter += 1 - if fail_counter == 0: - assert True - else: - assert False - - - # Clean up all png files - temp_datafile = os.path.join(cur_dir, 'sounding_data.dat') - os.remove(temp_datafile) - shutil.rmtree(output_dir) - # If running with the ' -p no:logging' option, then uncomment to ensure that log - # files are removed. - # shutil.rmtree('./logs') + assert fail_counter == 0 -def test_empty_input(): +def _check_empty_input(files_of_interest): ''' Checking that empty input file is not creating any plots. ''' - - os.environ['METPLOTPY_BASE'] = "../../" - cur_dir = os.getcwd() - custom_config_filename = os.path.join(cur_dir, "test_skew_t.yaml") - - # Invoke the command to generate a skew-T Diagram based on - # the test_skew_tm.yaml custom config file. - skew_t.main(custom_config_filename) - - # Verify that files for the ssh052023 data exists for the 0,6, 12,18,24, 30, 42, - # 48, 54, and 60 hour data. - output_dir = os.path.join(cur_dir, 'output') - - # Some of these data files have incomplete data so check for the expected hour - # plots. - - print(f"Output dir: {output_dir}") - file_ext = '.png' - files_of_interest = [] - for root, dir, files in os.walk(output_dir): - for item in files: - if item.endswith(file_ext): - # print(f"Item of interest: {item}") - full_file = os.path.join(root, item) - base_file = os.path.basename(full_file) - files_of_interest.append(base_file) - # List of empty files no_data_empty_file = ['sal092022_avno_doper_2022092800_diag'] @@ -213,18 +138,6 @@ def test_empty_input(): subsetted_files_of_interest.append(cur) match_found = re.match(r'^sal092022_avno_doper_2022092800_diag', - no_data_empty_file[0]) - if match_found in subsetted_files_of_interest: - # The output file was created when it shouldn't have been, fail. - assert False - else: - # The output file was NOT created, as expected. Pass. - assert True - - # Clean up all png files - temp_datafile = os.path.join(cur_dir, 'sounding_data.dat') - os.remove(temp_datafile) - shutil.rmtree(output_dir) - # If running without the ' -p no:logging' option, then uncomment to ensure that log - # files are removed. - # shutil.rmtree('./logs') + no_data_empty_file[0]) + # The output file was created when it shouldn't have been, fail. + assert match_found not in subsetted_files_of_interest diff --git a/test/skew_t/test_skew_t.yaml b/test/skew_t/test_skew_t.yaml index 696d5817..858fc8e0 100644 --- a/test/skew_t/test_skew_t.yaml +++ b/test/skew_t/test_skew_t.yaml @@ -4,11 +4,11 @@ # Input and output file information -input_directory: './data/2023/' +input_directory: !ENV '${TEST_DIR}/data/2023/' input_file_extension: '.dat' -output_directory: ./output +output_directory: !ENV '${TEST_DIR}/output' log_level: "INFO" -log_directory: ./logs +log_directory: !ENV '${TEST_DIR}/logs' log_filename: 'tc_diags_skewt.log' # Sounding hours of interest. If all_sounding_hours is set to False, then the diff --git a/test/taylor_diagram/taylor_diagram_custom.yaml b/test/taylor_diagram/taylor_diagram_custom.yaml index 8d6a3523..26fce36e 100644 --- a/test/taylor_diagram/taylor_diagram_custom.yaml +++ b/test/taylor_diagram/taylor_diagram_custom.yaml @@ -1,8 +1,8 @@ --- # custom config file to override some of the settings in the # default config file taylor_diagram_defaults.yaml -stat_input: ./plot_dlwr_sample.data -plot_filename: ./taylor_diagram_custom.png +stat_input: !ENV '${TEST_DIR}/plot_dlwr_sample.data' +plot_filename: !ENV '${TEST_DIR}/taylor_diagram_custom.png' # change the caption text plot_caption: "Custom caption" diff --git a/test/taylor_diagram/test_neg_and_pos_corr.yaml b/test/taylor_diagram/test_neg_and_pos_corr.yaml index 0e166f23..454f1120 100644 --- a/test/taylor_diagram/test_neg_and_pos_corr.yaml +++ b/test/taylor_diagram/test_neg_and_pos_corr.yaml @@ -1,8 +1,8 @@ --- # custom config file to override some of the settings in the # default config file taylor_diagram_defaults.yaml -stat_input: ./plot_dlwr_sample.data -plot_filename: ./test_neg_and_pos_corr_plot.png +stat_input: !ENV '${TEST_DIR}/plot_dlwr_sample.data' +plot_filename: !ENV '${TEST_DIR}/test_neg_and_pos_corr_plot.png' taylor_show_gamma: True # Show only positive values of correlation taylor_voc: False diff --git a/test/taylor_diagram/test_pos_corr.yaml b/test/taylor_diagram/test_pos_corr.yaml index e30d4ec5..27c995fb 100644 --- a/test/taylor_diagram/test_pos_corr.yaml +++ b/test/taylor_diagram/test_pos_corr.yaml @@ -1,8 +1,8 @@ --- # custom config file to override some of the settings in the # default config file taylor_diagram_defaults.yaml -stat_input: ./plot_dlwr_sample.data -plot_filename: ./test_pos_corr_plot.png +stat_input: !ENV '${TEST_DIR}/plot_dlwr_sample.data' +plot_filename: !ENV '${TEST_DIR}/test_pos_corr_plot.png' taylor_show_gamma: True # Show only positive values of correlation taylor_voc: True \ No newline at end of file diff --git a/test/taylor_diagram/test_taylor_diagram.py b/test/taylor_diagram/test_taylor_diagram.py index 7006af0c..415bee98 100644 --- a/test/taylor_diagram/test_taylor_diagram.py +++ b/test/taylor_diagram/test_taylor_diagram.py @@ -2,39 +2,39 @@ from metplotpy.plots.taylor_diagram import taylor_diagram as td #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) -def test_pos_corr_file_exists(): - os.environ['METPLOTPY_BASE'] = "../../" - test_config_filename = "test_pos_corr.yaml" + +def test_pos_corr_file_exists(setup_env): + setup_env(cwd) + + test_config_filename = f"{cwd}/test_pos_corr.yaml" td.main(test_config_filename) # Verify that a plot was generated - plot_file = "test_pos_corr_plot.png" - expected_file = "expected_pos_corr_plot.png" - path = os.getcwd() - assert os.path.isfile(plot_file) == True - + plot_file = f"{cwd}/test_pos_corr_plot.png" + expected_file = f"{cwd}/expected_pos_corr_plot.png" + assert os.path.isfile(plot_file) # image comparison #comparison = CompareImages(plot_file,expected_file) # assert comparison.mssim >= .99 # Clean up - os.remove(os.path.join(path, plot_file)) + os.remove(plot_file) -def test_pos_corr_file_exists(): - os.environ['METPLOTPY_BASE'] = "../../" - test_config_filename = "test_pos_corr.yaml" +def test_pos_corr_file_exists(setup_env): + setup_env(cwd) + test_config_filename = f"{cwd}/test_pos_corr.yaml" td.main(test_config_filename) # Verify that a plot was generated - plot_file = "test_pos_corr_plot.png" - path = os.getcwd() - assert os.path.isfile(plot_file) == True + plot_file = f"{cwd}/test_pos_corr_plot.png" + assert os.path.isfile(plot_file) # Clean up - os.remove(os.path.join(path, plot_file)) + os.remove(plot_file) # Not reliable when the expected image is generated on a Mac and then this # test is run on non-Mac machine. @@ -55,47 +55,47 @@ def test_pos_corr_file_exists(): # # Clean up # os.remove(os.path.join(path, plot_file)) -def test_neg_and_pos_corr_file_exists(): - os.environ['METPLOTPY_BASE'] = "../../" - test_config_filename = "test_neg_and_pos_corr.yaml" + +def test_neg_and_pos_corr_file_exists(setup_env): + setup_env(cwd) + test_config_filename = f"{cwd}/test_neg_and_pos_corr.yaml" td.main(test_config_filename) # Verify that a plot was generated - plot_file = "test_neg_and_pos_corr_plot.png" - path = os.getcwd() - assert os.path.isfile(plot_file) == True + plot_file = f"{cwd}/test_neg_and_pos_corr_plot.png" + assert os.path.isfile(plot_file) # Clean up - os.remove(os.path.join(path, plot_file)) + os.remove(plot_file) + # Not reliable when the expected image is generated on a Mac and then this # test is run on non-Mac machine. -def test_neg_and_pos_corr_images_match(): - os.environ['METPLOTPY_BASE'] = "../../" - test_config_filename = "test_neg_and_pos_corr.yaml" +def test_neg_and_pos_corr_images_match(setup_env): + setup_env(cwd) + test_config_filename = f"{cwd}/test_neg_and_pos_corr.yaml" td.main(test_config_filename) # Verify that a plot was generated - plot_file = "test_neg_and_pos_corr_plot.png" - expected_file = "expected_neg_and_pos_corr_plot.png" - path = os.getcwd() + plot_file = f"{cwd}/test_neg_and_pos_corr_plot.png" + expected_file = f"{cwd}/expected_neg_and_pos_corr_plot.png" # image comparison, with allowance of .99 match instead of 100% match #comparison = CompareImages(plot_file, expected_file) #assert comparison.mssim >= .99 # Clean up - os.remove(os.path.join(path, plot_file)) + os.remove(plot_file) + -def test_custom_plot_exists(): - os.environ['METPLOTPY_BASE'] = "../../" - test_config_filename = "taylor_diagram_custom.yaml" +def test_custom_plot_exists(setup_env): + setup_env(cwd) + test_config_filename = f"{cwd}/taylor_diagram_custom.yaml" td.main(test_config_filename) # Verify that a plot was generated - plot_file = "./taylor_diagram_custom.png" - path = os.getcwd() - assert os.path.isfile(plot_file) == True + plot_file = f"{cwd}/taylor_diagram_custom.png" + assert os.path.isfile(plot_file) # Clean up - os.remove(os.path.join(path, plot_file)) + os.remove(plot_file) diff --git a/test/tcmpr_plots/Data/TCDiag/tc_pairs_reformatted_tc_diag.tcst b/test/tcmpr_plots/Data/TCDiag/tc_pairs_reformatted_tc_diag.tcst new file mode 100644 index 00000000..5c87492c --- /dev/null +++ b/test/tcmpr_plots/Data/TCDiag/tc_pairs_reformatted_tc_diag.tcst @@ -0,0 +1,169 @@ +VERSION AMODEL BMODEL DESCR STORM_ID BASIN CYCLONE STORM_NAME INIT LEAD VALID INIT_MASK VALID_MASK TOTAL INDEX_PAIR LEVEL WATCH_WARN INITIALS ALAT ALON BLAT BLON TK_ERR X_ERR Y_ERR ALTK_ERR CRTK_ERR ADLAND BDLAND AMSLP BMSLP AMAX_WIND BMAX_WIND AAL_WIND_34 BAL_WIND_34 ANE_WIND_34 BNE_WIND_34 ASE_WIND_34 BSE_WIND_34 ASW_WIND_34 BSW_WIND_34 ANW_WIND_34 BNW_WIND_34 AAL_WIND_50 BAL_WIND_50 ANE_WIND_50 BNE_WIND_50 ASE_WIND_50 BSE_WIND_50 ASW_WIND_50 BSW_WIND_50 ANW_WIND_50 BNW_WIND_50 AAL_WIND_64 BAL_WIND_64 ANE_WIND_64 BNE_WIND_64 ASE_WIND_64 BSE_WIND_64 ASW_WIND_64 BSW_WIND_64 ANW_WIND_64 BNW_WIND_64 ARADP BRADP ARRP BRRP AMRD BMRD AGUSTS BGUSTS AEYE BEYE ADIR BDIR ASPEED BSPEED ADEPTH BDEPTH NUM_MEMBERS TRACK_SPREAD TRACK_STDEV MSLP_STDEV MAX_WIND_STDEV LINE_TYPE INDEX_PAIRS DIAG_SOURCE TRACK_SOURCE FIELD_SOURCE N_DIAG SHEAR_MAGNITUDE STORM_SPEED TPW DIST_TO_LAND PW01 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 000000 2022-09-26 00:00:00 NA NA 68 27 TS NA NA 16.8 -80.8 16.9 -80.9 8.30509 5.74231 -6.00002 -8.30358 0.0098815 170.37323 170.37323 999 991 36 50 53.5 50 59 60 NA 60 NA NA 48 30 NA 30 NA 30 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1007 1007 376 120 47 30 NA 60 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 27 CIRA_DIAG_RT GFS GFS_0p50 4 1 15 63 307 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 060000 2022-09-26 06:00:00 NA NA 68 28 HU NA NA 17.8 -81.9 17.7 -81.7 12.90818 -11.42901 5.99991 12.22981 -4.12226 182.68242 179.12946 992 985 47 65 55.5 60 55 70 57 70 37 30 73 70 NA 26.66667 NA 30 NA 30 NA NA NA 20 NA 15 NA 15 NA NA NA NA NA NA 1008 1007 245 150 42 15 NA 80 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 28 CIRA_DIAG_RT GFS GFS_0p50 4 2 12 63 331 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 120000 2022-09-26 12:00:00 NA NA 68 29 HU NA NA 18.6 -82.6 18.7 -82.4 12.85575 -11.3697 -6.00002 1.29942 -12.78759 189.05301 189.05301 989 981 53 70 63.75 75 86 90 62 80 54 40 53 90 48 36.66667 NA 40 48 40 NA NA NA 30 NA 20 NA 20 NA NA NA NA NA NA 1001 1008 47 150 40 15 NA 85 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 29 CIRA_DIAG_RT GFS GFS_0p50 4 4 11 65 354 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 180000 2022-09-26 18:00:00 NA NA 68 30 HU NA NA 19.7 -83.2 19.7 -83 11.29747 -11.29747 0 5.56856 -9.82742 153.56557 153.56557 985 976 65 80 88 85 85 100 86 90 74 60 107 90 44.66667 37.5 51 50 49 50 NA 20 34 30 32 25 32 30 NA 25 NA NA NA 20 1001 1008 55 150 32 20 NA 100 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 30 CIRA_DIAG_RT GFS GFS_0p50 4 3 13 64 255 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 240000 2022-09-27 00:00:00 NA NA 68 31 HU NA NA 20.9 -83.7 20.8 -83.3 23.21673 -22.42803 6.00002 11.37047 -20.23697 61.91212 61.91212 980 965 66 85 89.75 85 99 100 99 90 72 60 89 90 41 37.5 59 50 42 50 24 20 39 30 28 25 NA 30 28 25 NA NA NA 20 1008 1006 363 130 27 20 NA 105 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 31 CIRA_DIAG_RT GFS GFS_0p50 4 0 11 65 113 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 300000 2022-09-27 06:00:00 NA NA 68 32 HU NA NA 21.7 -84 21.8 -83.6 23.08485 -22.29151 -5.99991 0.22225 -23.07963 21.17544 12.00915 972 956 69 100 92.5 90 95 100 103 100 65 70 107 90 44 42.5 48 50 49 50 35 30 NA 40 36 23.75 36 30 36 25 NA 20 NA 20 1008 1008 205 200 32 15 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 32 CIRA_DIAG_RT GFS GFS_0p50 4 3 9 66 23 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 360000 2022-09-27 12:00:00 NA NA 68 34 HU NA NA 22.6 -84.3 22.6 -83.6 38.77508 -38.77508 0 0.00023739 -38.7681 12.60954 -16.52652 967 963 71 100 109.75 115 104 120 118 120 95 100 122 120 50.25 57.5 58 60 38 60 47 50 58 60 31 23.75 30 30 NA 25 NA 20 32 20 1007 1009 158 270 30 15 NA 135 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 34 CIRA_DIAG_RT GFS GFS_0p50 4 9 9 66 18 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 420000 2022-09-27 18:00:00 NA NA 68 35 HU NA NA 23.5 -84.5 23.5 -83.3 66.02816 -66.02816 0 -19.35787 -63.11434 60.64657 41.01329 962 951 80 105 113.5 115 115 120 123 120 100 100 116 120 58.25 57.5 62 60 55 60 51 50 65 60 41 27.5 43 35 39 35 43 20 39 20 982 1009 32 270 25 15 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 35 CIRA_DIAG_RT GFS GFS_0p50 4 13 9 67 112 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 480000 2022-09-28 00:00:00 NA NA 68 36 HU NA NA 24.4 -84.5 24.4 -83 81.96153 -81.96153 0 -23.88008 -78.39014 115.46132 93.44619 955 947 89 105 131.5 115 150 120 125 120 110 100 141 120 67.25 57.5 68 60 60 60 59 50 82 60 45.5 27.5 44 35 41 35 48 20 49 20 1007 1008 170 180 27 20 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 36 CIRA_DIAG_RT GFS GFS_0p50 4 19 9 66 204 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 540000 2022-09-28 06:00:00 NA NA 68 37 HU NA NA 25.2 -84.7 25.2 -82.9 97.72107 -97.72107 0 -11.01521 -97.08056 142.94229 88.74855 950 945 97 120 143 125 159 120 138 130 116 100 159 150 65.25 65 66 70 58 60 60 60 77 70 48 33.75 52 35 43 35 46 30 51 35 962 1010 20 200 23 20 NA 130 NA 25 NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 37 CIRA_DIAG_RT GFS GFS_0p50 4 24 8 64 293 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 600000 2022-09-28 12:00:00 NA NA 68 38 HU NA NA 26 -84.8 26 -82.7 113.24838 -113.24838 0 -24.90307 -110.45547 147.03418 36.89529 944 937 95 135 157.25 127.5 157 120 130 140 149 100 193 150 73 70 78 70 58 60 69 70 87 80 47.75 37.5 47 40 43 40 44 30 57 40 964 1010 20 270 27 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 38 CIRA_DIAG_RT GFS GFS_0p50 4 23 7 64 254 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 660000 2022-09-28 18:00:00 NA NA 68 39 HU NA NA 26.6 -84.7 26.6 -82.4 123.39304 -123.39304 0 -50.46132 -112.57892 111.27605 22.9944 943 938 96 135 162.75 132.5 179 130 114 150 150 100 208 150 84.5 65 103 50 64 60 71 70 100 80 51.75 36.25 51 30 44 40 50 30 62 45 967 1010 30 270 28 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 39 CIRA_DIAG_RT GFS GFS_0p50 4 27 5 64 222 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 720000 2022-09-29 00:00:00 NA NA 68 40 HU NA NA 27 -84.7 27.2 -81.7 160.68701 -160.2383 -12.00005 -123.82127 -102.3685 102.49017 -36.81872 944 960 98 100 215.5 160 373 180 94 120 179 120 216 220 84.5 57.5 111 50 59 60 73 50 95 70 56.5 36.25 63 30 45 40 53 30 65 45 968 1010 31 250 22 20 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 40 CIRA_DIAG_RT GFS GFS_0p50 4 36 3 61 207 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 780000 2022-09-29 06:00:00 NA NA 68 41 TS NA NA 27.2 -84.6 27.7 -81.1 188.7561 -186.35682 -30 -156.3563 -105.68227 102.49017 -34.57149 952 986 99 60 212.5 207.5 372 360 110 120 170 150 198 200 76.25 55 96 60 56 50 65 40 88 70 54 NA 63 NA 45 NA 47 NA 61 NA 968 1010 33 240 26 20 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 41 CIRA_DIAG_RT GFS GFS_0p50 4 32 3 60 190 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 840000 2022-09-29 12:00:00 NA NA 68 42 TS NA NA 27.5 -84.6 28.4 -80.6 218.77477 -212.00567 -53.99998 -158.70825 -150.52196 92.12616 5.2729 956 987 91 60 229.25 207.5 480 360 109 120 142 150 186 200 72.25 55 85 60 60 50 63 40 81 70 51 NA 58 NA 43 NA 44 NA 59 NA 972 1009 31 240 28 40 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 42 CIRA_DIAG_RT GFS GFS_0p50 4 28 3 59 180 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 900000 2022-09-29 18:00:00 NA NA 68 43 HU NA NA 27.8 -84.5 28.9 -80.1 241.52927 -232.33679 -66.00002 -202.81837 -131.07282 90.93851 43.03906 961 986 84 65 213.75 212.5 451 360 99 140 130 150 175 200 68.25 86.66667 73 120 60 NA 64 40 76 100 45 40 42 NA 46 NA 44 NA 48 40 977 1008 30 260 30 40 NA 80 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 43 CIRA_DIAG_RT GFS GFS_0p50 4 29 4 60 167 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 960000 2022-09-30 00:00:00 NA NA 68 44 HU NA NA 28.2 -84.6 29.6 -79.4 285.76925 -273.14477 -83.99998 -242.82529 -150.56737 90.93851 81.60885 965 986 81 70 221 227.5 471 420 88 150 155 120 170 220 68 85 81 60 53 80 63 80 75 120 42 50 39 NA 39 NA 40 40 50 60 981 1008 30 260 24 40 NA 85 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 44 CIRA_DIAG_RT GFS GFS_0p50 4 37 4 59 178 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 1020000 2022-09-30 06:00:00 NA NA 68 45 HU NA NA 28.5 -84.7 30.3 -79.1 312.01528 -292.72776 -107.99995 -203.11068 -236.7795 79.56824 122.45998 968 984 74 75 114.5 202.5 87 420 88 130 136 100 147 160 58 85 60 80 52 60 58 80 62 120 36.25 50 34 NA 31 NA 40 40 40 60 980 1007 29 180 31 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 45 CIRA_DIAG_RT GFS GFS_0p50 4 36 6 58 144 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 1080000 2022-09-30 12:00:00 NA NA 68 46 HU NA NA 29.3 -84.6 31.5 -79 318.45042 -289.80452 -132.00005 -152.3043 -279.60248 23.70624 90.4944 970 980 70 75 92 157.5 59 240 75 130 114 120 120 140 44 75 48 80 48 60 43 80 37 80 31 46.66667 23 40 37 NA 33 40 NA 60 1010 1008 726 210 28 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 46 CIRA_DIAG_RT GFS GFS_0p50 4 33 8 58 59 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 1140000 2022-09-30 18:00:00 NA NA 68 47 HU NA NA 30 -84.7 33.3 -79.2 343.68503 -280.91889 -197.99995 -170.86312 -298.13196 -0.96496 6.17339 981 977 45 75 73.5 120 66 190 68 130 81 100 79 60 NA 65 NA 80 NA 60 NA 80 NA 40 NA 35 NA NA NA 30 NA 40 NA NA 1005 1007 129 210 28 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 47 CIRA_DIAG_RT GFS GFS_0p50 4 36 6 55 -13 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 1200000 2022-10-01 00:00:00 NA NA 68 48 EX NA NA 30.4 -84.5 34.4 -79.3 356.36428 -263.43016 -240.00011 -219.44396 -280.70289 -26.14085 -54.04712 991 990 35 50 77 165 NA 160 NA 170 77 NA NA NA NA 90 NA NA NA 90 NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1007 1007 205 210 66 80 NA 60 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 48 CIRA_DIAG_RT GFS GFS_0p50 4 44 4 51 -38 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 1260000 2022-10-01 06:00:00 NA NA 68 49 EX NA NA 30.8 -84.4 35.3 -79.7 358.84725 -236.37121 -270 -172.62924 -314.52207 -54.97992 -109.86324 997 1005 29 15 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1009 1010 354 400 96 200 NA 40 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 49 CIRA_DIAG_RT GFS GFS_0p50 4 44 4 48 -78 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 000000 2022-09-26 00:00:00 NA NA 53 27 TS NA RJP 16.9 -80.9 16.9 -80.9 0 0 0 0 0 170.37323 170.37323 NA 991 50 50 50 50 60 60 60 60 NA NA 30 30 30 30 30 30 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1007 NA 120 NA 30 60 60 NA NA 325 0 15 0 NA D NA NA NA NA NA TCDIAG 27 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 2 311 63.9 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 120000 2022-09-26 12:00:00 NA NA 53 30 HU NA RJP 18.7 -82.3 18.7 -82.4 5.68317 5.68317 0 -3.14642 4.73148 189.05301 189.05301 NA 981 65 70 70 75 90 90 90 80 30 40 70 90 35 36.66667 40 40 30 40 NA NA NA 30 20 20 20 20 NA NA NA NA NA NA NA 1008 NA 150 NA 15 80 85 NA NA 330 0 11 0 NA D NA NA NA NA NA TCDIAG 30 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 4.9 369 65.7 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 240000 2022-09-27 00:00:00 NA NA 53 32 HU NA RJP 20.8 -83.5 20.8 -83.3 11.21774 -11.21774 0 2.78031 -10.86564 61.91212 61.91212 NA 965 85 85 92.5 85 120 100 100 90 60 60 90 90 30 37.5 40 50 30 50 20 20 30 30 13.33333 25 20 30 10 25 NA NA 10 20 NA 1006 NA 130 NA 20 105 105 NA NA 330 0 12 0 NA D NA NA NA NA NA TCDIAG 32 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 8.8 147 66.4 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 360000 2022-09-27 12:00:00 NA NA 53 35 HU NA RJP 22.7 -84 22.6 -83.6 22.94737 -22.14907 6.00002 5.99908 -22.14505 -4.86904 -16.52652 NA 963 100 100 100 115 130 120 100 120 80 100 90 120 45 57.5 50 60 50 60 40 50 40 60 22.5 23.75 30 30 20 25 20 20 20 20 NA 1009 NA 270 NA 15 120 135 NA NA 345 0 10 0 NA D NA NA NA NA NA TCDIAG 35 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 16 3 67.3 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 480000 2022-09-28 00:00:00 NA NA 53 37 HU NA RJP 24.7 -84.1 24.4 -83 62.6739 -60.03345 18.00007 -0.27544 -62.66201 105.63657 93.44619 NA 947 105 105 120 115 150 120 130 120 90 100 110 120 57.5 57.5 70 60 60 60 50 50 50 60 32.5 27.5 35 35 35 35 30 20 30 20 NA 1008 NA 180 NA 20 130 130 NA NA 355 0 10 0 NA D NA NA NA NA NA TCDIAG 37 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 24.4 208 65.3 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 600000 2022-09-28 12:00:00 NA NA 53 39 HU NA RJP 26.2 -83.8 26 -82.7 60.47273 -59.27015 12.00005 -1.32927 -60.44724 100.43401 36.89529 NA 937 115 135 135 127.5 180 120 130 140 100 100 130 150 70 70 80 70 70 60 60 70 70 80 NA 37.5 NA 40 NA 40 NA 30 NA 40 NA 1010 NA 270 NA 20 140 165 NA NA 10 0 8 0 NA D NA NA NA NA NA TCDIAG 39 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 28.4 241 63.6 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 720000 2022-09-29 00:00:00 NA NA 53 41 HU NA RJP 27.6 -83.5 27.2 -81.7 98.84221 -95.88422 23.99998 -52.48978 -83.73223 41.32911 -36.81872 NA 960 105 100 155 160 200 180 150 120 120 120 150 220 75 57.5 90 50 70 60 60 50 80 70 NA 36.25 NA 30 NA 40 NA 30 NA 45 NA 1010 NA 250 NA 20 130 120 NA NA 10 0 7 0 NA D NA NA NA NA NA TCDIAG 41 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 38.6 188 56.3 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 960000 2022-09-30 00:00:00 NA NA 53 45 HU NA RJP 29 -83.2 29.6 -79.4 202.06432 -198.83155 -36.00002 -157.81612 -126.1323 7.92187 81.60885 NA 986 80 70 NA 227.5 NA 420 NA 150 NA 120 NA 220 NA 85 NA 60 NA 80 NA 80 NA 120 NA 50 NA NA NA NA NA 40 NA 60 NA 1008 NA 260 NA 40 100 85 NA NA 10 0 4 0 NA D NA NA NA NA NA TCDIAG 45 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 41.5 58 57.3 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 1200000 2022-10-01 00:00:00 NA NA 53 49 EX NA RJP 32 -82.5 34.4 -79.3 215.74802 -160.65859 -144.00009 -131.47063 -171.01439 -70.34722 -54.04712 NA 990 40 50 NA 165 NA 160 NA 170 NA NA NA NA NA 90 NA NA NA 90 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1007 NA 210 NA 80 50 60 NA NA 10 0 8 0 NA D NA NA NA NA NA TCDIAG 49 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 42.4 -181 53.8 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 000000 2022-09-26 00:00:00 NA NA 54 27 TS NA NA 16.9 -80.8 16.9 -80.9 5.74079 5.74079 0 -3.96366 4.15142 170.37323 170.37323 NA 991 50 50 NA 50 NA 60 NA 60 NA NA NA 30 NA 30 NA 30 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1007 NA 120 NA 30 NA 60 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 27 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 2 311 63.9 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 120000 2022-09-26 12:00:00 NA NA 54 29 HU NA NA 18.7 -82.5 18.7 -82.4 5.68317 -5.68317 0 3.14642 -4.73148 189.05301 189.05301 NA 981 66 70 NA 75 NA 90 NA 80 NA 40 NA 90 NA 36.66667 NA 40 NA 40 NA NA NA 30 NA 20 NA 20 NA NA NA NA NA NA NA 1008 NA 150 NA 15 NA 85 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 29 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 4.9 369 65.7 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 240000 2022-09-27 00:00:00 NA NA 54 31 HU NA NA 20.6 -83.6 20.8 -83.3 20.67626 -16.83774 -11.99993 -7.45006 -19.28342 87.28262 61.91212 NA 965 88 85 NA 85 NA 100 NA 90 NA 60 NA 90 NA 37.5 NA 50 NA 50 NA 20 NA 30 NA 25 NA 30 NA 25 NA NA NA 20 NA 1006 NA 130 NA 20 NA 105 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 31 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 8.8 147 66.4 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 360000 2022-09-27 12:00:00 NA NA 54 34 HU NA NA 22.5 -84.1 22.6 -83.6 28.34859 -27.70636 -6.00002 -5.99877 -27.7014 -4.86904 -16.52652 NA 963 104 100 NA 115 NA 120 NA 120 NA 100 NA 120 NA 57.5 NA 60 NA 60 NA 50 NA 60 NA 23.75 NA 30 NA 25 NA 20 NA 20 NA 1009 NA 270 NA 15 NA 135 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 34 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 16 3 67.3 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 480000 2022-09-28 00:00:00 NA NA 54 36 HU NA NA 24.4 -84.6 24.4 -83 87.42555 -87.42555 0 -25.47206 -83.61607 115.46132 93.44619 NA 947 117 105 NA 115 NA 120 NA 120 NA 100 NA 120 NA 57.5 NA 60 NA 60 NA 50 NA 60 NA 27.5 NA 35 NA 35 NA 20 NA 20 NA 1008 NA 180 NA 20 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 36 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 24.4 208 65.3 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 600000 2022-09-28 12:00:00 NA NA 54 38 HU NA NA 26.1 -84.6 26 -82.7 102.59455 -102.41895 6.00002 -16.66965 -101.21253 123.01351 36.89529 NA 937 116 135 NA 127.5 NA 120 NA 140 NA 100 NA 150 NA 70 NA 70 NA 60 NA 70 NA 80 NA 37.5 NA 40 NA 40 NA 30 NA 40 NA 1010 NA 270 NA 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 38 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 28.4 241 63.6 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 720000 2022-09-29 00:00:00 NA NA 54 40 HU NA NA 27.2 -84.6 27.2 -81.7 154.75853 -154.75853 0 -111.55713 -107.22211 102.49017 -36.81872 NA 960 112 100 NA 160 NA 180 NA 120 NA 120 NA 220 NA 57.5 NA 50 NA 60 NA 50 NA 70 NA 36.25 NA 30 NA 40 NA 30 NA 45 NA 1010 NA 250 NA 20 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 40 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 38.6 188 56.3 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 840000 2022-09-29 12:00:00 NA NA 54 42 TS NA NA 28.2 -84.3 28.4 -80.6 195.83421 -195.46621 -11.99993 -114.36647 -158.9262 90.93851 5.2729 NA 987 99 60 NA 207.5 NA 360 NA 120 NA 150 NA 200 NA 55 NA 60 NA 50 NA 40 NA 70 NA NA NA NA NA NA NA NA NA NA NA 1009 NA 240 NA 40 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 42 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 37.6 156 58.3 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 960000 2022-09-30 00:00:00 NA NA 54 44 HU NA NA 29.4 -84 29.6 -79.4 240.51764 -240.21809 -12.00005 -166.9389 -173.08755 25.63065 81.60885 NA 986 87 70 NA 227.5 NA 420 NA 150 NA 120 NA 220 NA 85 NA 60 NA 80 NA 80 NA 120 NA 50 NA NA NA NA NA 40 NA 60 NA 1008 NA 260 NA 40 NA 85 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 44 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 41.5 58 57.3 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 1080000 2022-09-30 12:00:00 NA NA 54 46 HU NA NA 30.7 -83.5 31.5 -79 236.1224 -231.19211 -47.99995 -64.35391 -227.13933 -36.46659 90.4944 NA 980 75 75 NA 157.5 NA 240 NA 130 NA 120 NA 140 NA 75 NA 80 NA 60 NA 80 NA 80 NA 46.66667 NA 40 NA NA NA 40 NA 60 NA 1008 NA 210 NA 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 46 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 38 -88 55.8 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 00:00:00 1200000 2022-10-01 00:00:00 NA NA 54 48 EX NA NA 32.3 -83 34.4 -79.3 224.19859 -185.44265 -126.00014 -111.65919 -194.36853 -105.20315 -54.04712 NA 990 65 50 NA 165 NA 160 NA 170 NA NA NA NA NA 90 NA NA NA 90 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1007 NA 210 NA 80 NA 60 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 48 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 42.4 -181 53.8 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 000000 2022-09-26 06:00:00 NA NA 69 28 HU NA NA 17.8 -81.8 17.7 -81.7 8.28595 -5.71472 5.99991 8.15906 -1.43597 182.68242 179.12946 993 985 47 65 51.25 60 53 70 52 70 38 30 62 70 NA 26.66667 NA 30 NA 30 NA NA NA 20 NA 15 NA 15 NA NA NA NA NA NA 1009 1007 319 150 38 15 NA 80 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 28 CIRA_DIAG_RT GFS GFS_0p50 4 3 12 63 338 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 060000 2022-09-26 12:00:00 NA NA 69 29 HU NA NA 18.8 -82.5 18.7 -82.4 8.26307 -5.68149 5.99991 8.14066 -1.40831 190.11078 189.05301 990 981 46 70 74 75 90 90 72 80 45 40 89 90 NA 36.66667 NA 40 NA 40 NA NA NA 30 NA 20 NA 20 NA NA NA NA NA NA 1006 1008 103 150 27 15 NA 85 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 29 CIRA_DIAG_RT GFS GFS_0p50 4 2 12 64 374 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 120000 2022-09-26 18:00:00 NA NA 69 30 HU NA NA 19.9 -83.2 19.7 -83 16.47639 -11.2904 11.99993 16.00354 -3.90646 127.23096 153.56557 988 976 62 80 79.5 85 86 100 92 90 71 60 69 90 43 37.5 45 50 41 50 NA 20 NA 30 NA 25 NA 30 NA 25 NA NA NA 20 1008 1008 264 150 40 20 NA 100 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 30 CIRA_DIAG_RT GFS GFS_0p50 4 2 12 64 235 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 180000 2022-09-27 00:00:00 NA NA 69 31 HU NA NA 21 -83.5 20.8 -83.3 16.42168 -11.21028 12.00005 14.40186 -7.88422 61.91212 61.91212 980 965 60 85 77.5 85 86 100 86 90 72 60 66 90 46.66667 37.5 46 50 57 50 NA 20 37 30 NA 25 NA 30 NA 25 NA NA NA 20 1004 1006 86 130 22 20 NA 105 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 31 CIRA_DIAG_RT GFS GFS_0p50 4 3 11 64 115 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 240000 2022-09-27 06:00:00 NA NA 69 32 HU NA NA 22 -83.8 21.8 -83.6 16.3699 -11.13429 12.00005 14.55173 -7.4916 5.05424 12.00915 976 956 62 100 86.75 90 93 100 89 100 74 70 91 90 41.66667 42.5 48 50 36 50 NA 30 41 40 NA 23.75 NA 30 NA 25 NA 20 NA 20 1008 1008 203 200 31 15 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 32 CIRA_DIAG_RT GFS GFS_0p50 4 9 10 66 22 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 300000 2022-09-27 12:00:00 NA NA 69 34 HU NA NA 22.9 -84 22.6 -83.6 28.5283 -22.13291 17.99995 17.99685 -22.12881 22.00319 -16.52652 975 963 64 100 118.75 115 156 120 110 120 86 100 123 120 54.33333 57.5 55 60 NA 60 51 50 57 60 32 23.75 32 30 NA 25 NA 20 NA 20 1007 1009 168 270 32 15 NA 135 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 34 CIRA_DIAG_RT GFS GFS_0p50 4 12 10 66 31 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 360000 2022-09-27 18:00:00 NA NA 69 35 HU NA NA 23.9 -84 23.5 -83.3 45.332 -38.45766 23.99998 11.666 -43.79675 77.08438 41.01329 968 951 73 105 116 115 137 120 113 120 86 100 128 120 61 57.5 62 60 61 60 56 50 65 60 41 27.5 45 35 44 35 40 20 35 20 1008 1009 162 270 37 15 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 35 CIRA_DIAG_RT GFS GFS_0p50 4 16 9 66 132 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 420000 2022-09-28 00:00:00 NA NA 69 36 HU NA NA 24.7 -84 24.4 -83 57.46769 -54.57594 18.00007 1.31464 -57.4423 105.63657 93.44619 958 947 91 105 134.5 115 147 120 138 120 115 100 138 120 63 57.5 60 60 58 60 61 50 73 60 44.75 27.5 45 35 38 35 45 20 51 20 970 1008 20 180 30 20 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 36 CIRA_DIAG_RT GFS GFS_0p50 4 19 8 64 217 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 480000 2022-09-28 06:00:00 NA NA 69 37 HU NA NA 25.5 -84 25.2 -82.9 62.30162 -59.64473 17.99995 11.15877 -61.28275 120.89156 88.74855 955 945 89 120 155.75 125 149 120 160 130 124 100 190 150 68 65 68 70 66 60 65 60 73 70 45 33.75 49 35 40 35 46 30 45 35 979 1010 31 200 33 20 NA 130 NA 25 NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 37 CIRA_DIAG_RT GFS GFS_0p50 4 24 8 63 224 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 540000 2022-09-28 12:00:00 NA NA 69 38 HU NA NA 26.2 -83.8 26 -82.7 60.47273 -59.27015 12.00005 -1.32927 -60.44724 100.43401 36.89529 950 937 91 135 152.75 127.5 154 120 114 140 160 100 183 150 69.75 70 77 70 55 60 67 70 80 80 47.25 37.5 48 40 44 40 44 30 53 40 970 1010 28 270 31 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 38 CIRA_DIAG_RT GFS GFS_0p50 4 25 7 63 157 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 600000 2022-09-28 18:00:00 NA NA 69 39 HU NA NA 26.9 -83.8 26.6 -82.4 77.13978 -75.01031 17.99995 -14.25286 -75.79747 78.18031 22.9944 952 938 95 135 150.5 132.5 159 130 103 150 146 100 194 150 71.5 65 85 50 56 60 66 70 79 80 46.5 36.25 43 30 43 40 44 30 56 45 972 1010 23 270 27 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 39 CIRA_DIAG_RT GFS GFS_0p50 4 29 5 63 128 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 660000 2022-09-29 00:00:00 NA NA 69 40 HU NA NA 27.1 -83.6 27.2 -81.7 101.61628 -101.43898 -6.00002 -77.27895 -65.95537 52.14397 -36.81872 950 960 102 100 201.75 160 295 180 125 120 182 120 205 220 77.25 57.5 93 50 57 60 75 50 84 70 48 36.25 42 30 42 40 50 30 58 45 978 1010 33 250 27 20 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 40 CIRA_DIAG_RT GFS GFS_0p50 4 35 1 61 104 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 720000 2022-09-29 06:00:00 NA NA 69 41 TS NA NA 27.1 -83.7 27.7 -81.1 143.10139 -138.49912 -36.00002 -125.58246 -68.55422 52.14397 -34.57149 959 986 88 60 182.75 207.5 298 360 105 120 139 150 189 200 66.25 55 71 60 56 50 58 40 80 70 43.25 NA 37 NA 38 NA 45 NA 53 NA 987 1010 31 240 28 20 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 41 CIRA_DIAG_RT GFS GFS_0p50 4 36 1 59 113 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 780000 2022-09-29 12:00:00 NA NA 69 42 TS NA NA 27.2 -83.7 28.4 -80.6 179.59611 -164.53198 -71.99993 -148.6209 -100.77191 52.14397 5.2729 964 987 86 60 198.75 207.5 413 360 106 120 119 150 157 200 59.5 55 66 60 51 50 47 40 74 70 35 NA 27 NA 27 NA 36 NA 50 NA 984 1009 35 240 22 40 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 42 CIRA_DIAG_RT GFS GFS_0p50 4 32 2 59 111 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 840000 2022-09-29 18:00:00 NA NA 69 43 HU NA NA 27.4 -83.6 28.9 -80.1 205.87452 -185.16025 -90 -189.74233 -79.79287 41.32911 43.03906 966 986 84 65 201 212.5 433 360 101 140 122 150 148 200 60 86.66667 73 120 50 NA 51 40 66 100 39.33333 40 37 NA NA NA 38 NA 43 40 986 1008 35 260 23 40 NA 80 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 43 CIRA_DIAG_RT GFS GFS_0p50 4 29 2 59 90 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 900000 2022-09-30 00:00:00 NA NA 69 44 HU NA NA 27.6 -83.6 29.6 -79.4 251.69872 -221.25155 -120 -235.83688 -87.80883 41.32911 81.60885 967 986 83 70 209 227.5 414 420 87 150 143 120 192 220 61.25 85 58 60 49 80 63 80 75 120 36.25 50 26 NA 36 NA 36 40 47 60 979 1008 20 260 21 40 NA 85 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 44 CIRA_DIAG_RT GFS GFS_0p50 4 33 3 59 82 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 960000 2022-09-30 06:00:00 NA NA 69 45 HU NA NA 28 -83.7 30.3 -79.1 277.75189 -241.04383 -137.99995 -213.24056 -177.89547 38.81667 122.45998 976 984 62 75 102.25 202.5 77 420 86 130 113 100 133 160 50.75 85 41 80 51 60 46 80 65 120 NA 50 NA NA NA NA NA 40 NA 60 1008 1007 191 180 33 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 45 CIRA_DIAG_RT GFS GFS_0p50 4 33 6 60 91 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 1020000 2022-09-30 12:00:00 NA NA 69 46 HU NA NA 28.8 -83.9 31.5 -79 301.45445 -254.22583 -162.00005 -179.68565 -241.98164 47.35473 90.4944 974 980 68 75 91.75 157.5 61 240 72 130 98 120 136 140 43.75 75 36 80 39 60 46 80 54 80 30 46.66667 NA 40 NA NA 27 40 33 60 1010 1008 697 210 20 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 46 CIRA_DIAG_RT GFS GFS_0p50 4 29 7 58 95 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 1080000 2022-09-30 18:00:00 NA NA 69 47 HU NA NA 29.4 -84 33.3 -79.2 339.4837 -245.95364 -233.99998 -209.96478 -266.68797 25.63065 6.17339 979 977 57 75 81.75 120 31 190 80 130 106 100 110 60 30.33333 65 27 80 NA 60 33 80 31 40 NA 35 NA NA NA 30 NA 40 NA NA 1007 1007 171 210 17 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 47 CIRA_DIAG_RT GFS GFS_0p50 4 34 6 56 56 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 1140000 2022-10-01 00:00:00 NA NA 69 48 EX NA NA 29.9 -83.8 34.4 -79.3 353.77526 -228.59762 -270.00011 -251.9757 -248.23398 1.44836 -54.04712 984 990 43 50 57 165 NA 160 54 170 95 NA 22 NA NA 90 NA NA NA 90 NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1008 1007 245 210 23 80 NA 60 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 48 CIRA_DIAG_RT GFS GFS_0p50 4 39 5 54 2 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 1200000 2022-10-01 06:00:00 NA NA 69 49 EX NA NA 30.4 -83.7 35.3 -79.7 356.49348 -201.62245 -293.99998 -207.07694 -290.10511 -36.46659 -109.86324 992 1005 29 15 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1008 1010 278 400 106 200 NA 40 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 49 CIRA_DIAG_RT GFS GFS_0p50 4 39 7 51 -48 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 1260000 2022-10-01 12:00:00 NA NA 69 50 EX NA NA 31.2 -83.3 35.8 -79.9 324.21337 -170.11278 -275.99991 -209.76551 -247.13336 -63.46339 -146.50467 997 1005 27 20 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1009 1010 272 405 115 400 NA 30 NA NA NA 0 NA 0 NA M NA NA NA NA NA TCDIAG 50 CIRA_DIAG_RT GFS GFS_0p50 4 37 9 49 -145 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 000000 2022-09-26 06:00:00 NA NA 53 28 HU NA DPB 17.7 -81.7 17.7 -81.7 0 0 0 0 0 179.12946 179.12946 NA 985 65 65 60 60 70 70 70 70 30 30 70 70 26.66667 26.66667 30 30 30 30 NA NA 20 20 15 15 15 15 NA NA NA NA NA NA NA 1007 NA 150 NA 15 80 80 NA NA 315 0 11 0 NA D NA NA NA NA NA TCDIAG 28 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 3.4 340 64 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 120000 2022-09-26 18:00:00 NA NA 53 31 HU NA DPB 19.7 -83 19.7 -83 0 0 0 0 0 153.56557 153.56557 NA 976 90 80 87.5 85 100 100 100 90 60 60 90 90 42.5 37.5 50 50 50 50 30 20 40 30 18.75 25 20 30 20 25 15 NA 20 20 NA 1008 NA 150 NA 20 110 100 NA NA 330 0 12 0 NA D NA NA NA NA NA TCDIAG 31 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 6.6 230 65.8 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 240000 2022-09-27 06:00:00 NA NA 53 33 HU NA DPB 21.7 -83.9 21.8 -83.6 17.76275 -16.71874 -5.99991 -1.27763 -17.71353 21.17544 12.00915 NA 956 105 100 100 90 120 100 110 100 70 70 100 90 45 42.5 50 50 50 50 30 30 50 40 21.25 23.75 25 30 25 25 15 20 20 20 NA 1008 NA 200 NA 15 130 120 NA NA 335 0 11 0 NA D NA NA NA NA NA TCDIAG 33 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 12 19 67.9 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 360000 2022-09-27 18:00:00 NA NA 53 36 HU NA DPB 23.6 -84.1 23.5 -83.3 44.4091 -44.00191 6.00002 -7.16506 -43.81918 49.50692 41.01329 NA 951 115 105 110 115 130 120 110 120 90 100 110 120 52.5 57.5 60 60 60 60 40 50 50 60 26.25 27.5 30 35 30 35 20 20 25 20 NA 1009 NA 270 NA 15 140 130 NA NA 355 0 10 0 NA D NA NA NA NA NA TCDIAG 36 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 19.4 141 68.1 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 480000 2022-09-28 06:00:00 NA NA 53 38 HU NA DPB 25.3 -84.1 25.2 -82.9 65.39642 -65.1206 5.99991 -1.37987 -65.37008 120.89156 88.74855 NA 945 120 120 127.5 125 150 120 130 130 100 100 130 150 60 65 70 70 60 60 50 60 60 70 32.5 33.75 35 35 35 35 30 30 30 35 NA 1010 NA 200 NA 20 145 130 NA 25 360 0 8 0 NA D NA NA NA NA NA TCDIAG 38 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 27.6 236 66.5 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 600000 2022-09-28 18:00:00 NA NA 53 40 HU NA DPB 26.7 -83.7 26.6 -82.4 69.97101 -69.71328 6.00002 -23.03491 -66.05732 61.40549 22.9944 NA 938 115 135 142.5 132.5 180 130 130 150 110 100 150 150 70 65 80 50 70 60 60 70 70 80 NA 36.25 NA 30 NA 40 NA 30 NA 45 NA 1010 NA 270 NA 20 140 165 NA NA 15 0 7 0 NA D NA NA NA NA NA TCDIAG 40 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 28.2 116 66.7 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 720000 2022-09-29 06:00:00 NA NA 53 42 TS NA DPB 27.7 -83.4 27.7 -81.1 122.18448 -122.18448 0 -89.05243 -83.62616 41.32911 -34.57149 NA 986 100 60 157.5 207.5 210 360 150 120 120 150 150 200 75 55 90 60 70 50 60 40 80 70 NA NA NA NA NA NA NA NA NA NA NA 1010 NA 240 NA 20 120 75 NA NA 15 0 5 0 NA D NA NA NA NA NA TCDIAG 42 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 38.1 82 59.9 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 960000 2022-09-30 06:00:00 NA NA 53 46 HU NA DPB 29.2 -83 30.3 -79.1 213.61041 -203.1586 -65.99991 -132.57011 -167.44591 7.92187 122.45998 NA 984 80 75 NA 202.5 NA 420 NA 130 NA 100 NA 160 NA 85 NA 80 NA 60 NA 80 NA 120 NA 50 NA NA NA NA NA 40 NA 60 NA 1007 NA 180 NA 40 100 90 NA NA 15 0 4 0 NA D NA NA NA NA NA TCDIAG 46 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 38.6 -3 55.6 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 1200000 2022-10-01 06:00:00 NA NA 53 50 EX NA DPB 32 -82.9 35.3 -79.7 254.4584 -159.82833 -197.99995 -131.22339 -217.95911 -93.88587 -109.86324 NA 1005 35 15 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1010 NA 400 NA 200 45 40 NA NA 0 0 7 0 NA D NA NA NA NA NA TCDIAG 50 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 47.4 -187 51.4 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 000000 2022-09-26 06:00:00 NA NA 55 28 HU NA NA 17.7 -81.6 17.7 -81.7 5.71588 5.71588 0 -3.16453 4.75871 179.12946 179.12946 NA 985 65 65 NA 60 NA 70 NA 70 NA 30 NA 70 NA 26.66667 NA 30 NA 30 NA NA NA 20 NA 15 NA 15 NA NA NA NA NA NA NA 1007 NA 150 NA 15 NA 80 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 28 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 3.4 340 64 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 120000 2022-09-26 18:00:00 NA NA 55 30 HU NA NA 20 -83.1 19.7 -83 18.8639 -5.64342 17.99995 18.43941 3.96315 127.23096 153.56557 NA 976 88 80 NA 85 NA 100 NA 90 NA 60 NA 90 NA 37.5 NA 50 NA 50 NA 20 NA 30 NA 25 NA 30 NA 25 NA NA NA 20 NA 1008 NA 150 NA 20 NA 100 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 30 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 6.6 230 65.8 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 240000 2022-09-27 06:00:00 NA NA 55 32 HU NA NA 21.8 -84 21.8 -83.6 22.28374 -22.28374 0 5.99755 -21.45731 5.05424 12.00915 NA 956 107 100 NA 90 NA 100 NA 100 NA 70 NA 90 NA 42.5 NA 50 NA 50 NA 30 NA 40 NA 23.75 NA 30 NA 25 NA 20 NA 20 NA 1008 NA 200 NA 15 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 32 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 12 19 67.9 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 360000 2022-09-27 18:00:00 NA NA 55 35 HU NA NA 23.9 -84.1 23.5 -83.3 50.07732 -43.95155 23.99998 10.05532 -49.0482 77.08438 41.01329 NA 951 124 105 NA 115 NA 120 NA 120 NA 100 NA 120 NA 57.5 NA 60 NA 60 NA 50 NA 60 NA 27.5 NA 35 NA 35 NA 20 NA 20 NA 1009 NA 270 NA 15 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 35 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 19.4 141 68.1 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 480000 2022-09-28 06:00:00 NA NA 55 37 HU NA NA 25.6 -84.1 25.2 -82.9 69.32674 -65.03998 23.99998 16.51131 -67.31897 120.89156 88.74855 NA 945 128 120 NA 125 NA 120 NA 130 NA 100 NA 150 NA 65 NA 70 NA 60 NA 60 NA 70 NA 33.75 NA 35 NA 35 NA 30 NA 35 NA 1010 NA 200 NA 20 NA 130 NA 25 NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 37 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 27.6 236 66.5 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 600000 2022-09-28 18:00:00 NA NA 55 39 HU NA NA 27 -83.6 26.6 -82.4 68.60116 -64.26602 23.99998 -4.38482 -68.44851 52.14397 22.9944 NA 938 129 135 NA 132.5 NA 130 NA 150 NA 100 NA 150 NA 65 NA 50 NA 60 NA 70 NA 80 NA 36.25 NA 30 NA 40 NA 30 NA 45 NA 1010 NA 270 NA 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 39 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 28.2 116 66.7 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 720000 2022-09-29 06:00:00 NA NA 55 41 TS NA NA 28.4 -83.6 27.7 -81.1 138.8835 -132.38063 41.99993 -67.73793 -121.21574 45.07822 -34.57149 NA 986 118 60 NA 207.5 NA 360 NA 120 NA 150 NA 200 NA 55 NA 60 NA 50 NA 40 NA 70 NA NA NA NA NA NA NA NA NA NA NA 1010 NA 240 NA 20 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 41 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 38.1 82 59.9 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 840000 2022-09-29 18:00:00 NA NA 55 43 HU NA NA 29 -83.5 28.9 -80.1 178.60955 -178.50874 6.00002 -113.21303 -138.10391 27.67527 43.03906 NA 986 105 65 NA 212.5 NA 360 NA 140 NA 150 NA 200 NA 86.66667 NA 120 NA NA NA 40 NA 100 NA 40 NA NA NA NA NA NA NA 40 NA 1008 NA 260 NA 40 NA 80 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 43 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 34.3 48 58.6 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 960000 2022-09-30 06:00:00 NA NA 55 45 HU NA NA 29.3 -83.1 30.3 -79.1 216.73434 -208.26371 -60 -128.72333 -174.31913 -11.77626 122.45998 NA 984 89 75 NA 202.5 NA 420 NA 130 NA 100 NA 160 NA 85 NA 80 NA 60 NA 80 NA 120 NA 50 NA NA NA NA NA 40 NA 60 NA 1007 NA 180 NA 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 45 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 38.6 -3 55.6 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 1080000 2022-09-30 18:00:00 NA NA 55 47 HU NA NA 30.8 -82.7 33.3 -79.2 232.76916 -177.99292 -150 -132.69412 -191.19161 -51.42987 6.17339 NA 977 74 75 NA 120 NA 190 NA 130 NA 100 NA 60 NA 65 NA 80 NA 60 NA 80 NA 40 NA 35 NA NA NA 30 NA 40 NA NA NA 1007 NA 210 NA 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 47 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 38.7 -121 54.9 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 06:00:00 1200000 2022-10-01 06:00:00 NA NA 55 49 EX NA NA 33.5 -82.5 35.3 -79.7 175.72501 -138.61922 -107.99995 -53.95326 -167.20406 -115.71823 -109.86324 NA 1005 58 15 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1010 NA 400 NA 200 NA 40 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 49 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 47.4 -187 51.4 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 000000 2022-09-26 12:00:00 NA NA 64 29 HU NA NA 18.8 -82.5 18.7 -82.4 8.26307 -5.68149 5.99991 8.01961 -1.98483 190.11078 189.05301 989 981 49 70 74 75 90 90 71 80 46 40 89 90 NA 36.66667 NA 40 NA 40 NA NA NA 30 NA 20 NA 20 NA NA NA NA NA NA 1009 1008 223 150 26 15 NA 85 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 29 CIRA_DIAG_RT GFS GFS_0p50 4 3 13 64 374 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 060000 2022-09-26 18:00:00 NA NA 64 30 HU NA NA 19.9 -83.2 19.7 -83 16.47639 -11.2904 11.99993 16.00354 -3.90646 127.23096 153.56557 983 976 60 80 87.75 85 99 100 97 90 84 60 71 90 49.33333 37.5 55 50 55 50 NA 20 38 30 NA 25 NA 30 NA 25 NA NA NA 20 1003 1008 74 150 41 20 NA 100 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 30 CIRA_DIAG_RT GFS GFS_0p50 4 1 12 65 235 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 120000 2022-09-27 00:00:00 NA NA 64 31 HU NA NA 21.1 -83.4 20.8 -83.3 18.85203 -5.60327 18.00007 18.82386 -0.96609 61.91212 61.91212 978 965 72 85 91.75 85 94 100 95 90 89 60 89 90 48.33333 37.5 52 50 52 50 NA 20 41 30 28.5 25 29 30 28 25 NA NA NA 20 1006 1006 160 130 25 20 NA 105 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 31 CIRA_DIAG_RT GFS GFS_0p50 4 3 11 65 114 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 180000 2022-09-27 06:00:00 NA NA 64 32 HU NA NA 22 -83.8 21.8 -83.6 16.3699 -11.13429 12.00005 14.55173 -7.4916 5.05424 12.00915 971 956 73 100 97.5 90 100 100 110 100 82 70 98 90 49.75 42.5 59 50 49 50 35 30 56 40 34 23.75 NA 30 34 25 NA 20 NA 20 1007 1008 159 200 34 15 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 32 CIRA_DIAG_RT GFS GFS_0p50 4 8 10 65 22 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 240000 2022-09-27 12:00:00 NA NA 64 34 HU NA NA 23 -83.8 22.6 -83.6 26.42689 -11.06261 23.99998 23.99572 -11.06047 22.00319 -16.52652 968 963 74 100 129.5 115 170 120 118 120 95 100 135 120 47.75 57.5 60 60 31 60 44 50 56 60 34.66667 23.75 40 30 NA 25 30 20 34 20 984 1009 31 270 33 15 NA 135 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 34 CIRA_DIAG_RT GFS GFS_0p50 4 13 11 66 33 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 300000 2022-09-27 18:00:00 NA NA 64 35 HU NA NA 24.1 -83.9 23.5 -83.3 48.79492 -32.93846 36.00002 24.75458 -42.03924 77.08438 41.01329 960 951 85 105 126 115 150 120 122 120 103 100 129 120 64.5 57.5 65 60 74 60 54 50 65 60 44 27.5 44 35 51 35 35 20 46 20 972 1009 30 270 24 15 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 35 CIRA_DIAG_RT GFS GFS_0p50 4 18 9 66 150 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 360000 2022-09-28 00:00:00 NA NA 64 36 HU NA NA 24.9 -83.8 24.4 -83 52.94556 -43.62605 30 15.98202 -50.4658 134.80551 93.44619 953 947 89 105 139.75 115 144 120 146 120 118 100 151 120 68.25 57.5 67 60 65 60 64 50 77 60 49.25 27.5 48 35 47 35 50 20 52 20 977 1008 27 180 29 20 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 36 CIRA_DIAG_RT GFS GFS_0p50 4 21 8 65 234 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 420000 2022-09-28 06:00:00 NA NA 64 37 HU NA NA 25.7 -83.9 25.2 -82.9 61.92912 -54.17764 30 23.69642 -57.20415 120.89156 88.74855 951 945 98 120 145.25 125 152 120 162 130 114 100 153 150 70.75 65 80 70 59 60 61 60 83 70 50 33.75 53 35 44 35 50 30 53 35 979 1010 39 200 30 20 NA 130 NA 25 NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 37 CIRA_DIAG_RT GFS GFS_0p50 4 23 8 63 201 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 480000 2022-09-28 12:00:00 NA NA 64 38 HU NA NA 26.4 -83.8 26 -82.7 63.89784 -59.21938 23.99998 10.38589 -63.03647 85.78395 36.89529 948 937 94 135 149.75 127.5 154 120 112 140 149 100 184 150 72.75 70 81 70 62 60 66 70 82 80 48.5 37.5 50 40 42 40 47 30 55 40 968 1010 33 270 25 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 38 CIRA_DIAG_RT GFS GFS_0p50 4 26 6 63 145 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 540000 2022-09-28 18:00:00 NA NA 64 39 HU NA NA 26.9 -83.7 26.6 -82.4 71.94036 -69.65211 17.99995 -12.06164 -70.90886 52.14397 22.9944 949 938 102 135 158 132.5 139 130 115 150 152 100 226 150 73 65 80 50 58 60 64 70 90 80 51.5 36.25 50 30 44 40 48 30 64 45 965 1010 20 270 21 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 39 CIRA_DIAG_RT GFS GFS_0p50 4 30 5 62 118 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 600000 2022-09-29 00:00:00 NA NA 64 40 HU NA NA 27.3 -83.3 27.2 -81.7 85.55661 -85.34597 5.99991 -57.3644 -63.45568 41.32911 -36.81872 949 960 94 100 215 160 365 180 108 120 188 120 199 220 72.75 57.5 76 50 49 60 80 50 86 70 48 36.25 37 30 45 40 53 30 57 45 973 1010 31 250 27 20 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 40 CIRA_DIAG_RT GFS GFS_0p50 4 34 4 60 70 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 660000 2022-09-29 06:00:00 NA NA 64 41 TS NA NA 27.5 -83.1 27.7 -81.1 107.01934 -106.34443 -12.00005 -85.72076 -64.03877 20.90425 -34.57149 956 986 91 60 225.75 207.5 436 360 120 120 147 150 200 200 62.5 55 61 60 44 50 66 40 79 70 42.25 NA 33 NA 41 NA 41 NA 54 NA 976 1010 29 240 28 20 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 41 CIRA_DIAG_RT GFS GFS_0p50 4 37 3 59 45 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 720000 2022-09-29 12:00:00 NA NA 64 42 TS NA NA 27.7 -82.9 28.4 -80.6 128.82889 -121.79035 -41.99993 -100.45844 -80.61643 20.90425 5.2729 962 987 79 60 217 207.5 482 360 102 120 129 150 155 200 54.25 55 37 60 47 50 57 40 76 70 42 NA NA NA NA NA 37 NA 47 NA 978 1009 31 240 30 40 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 42 CIRA_DIAG_RT GFS GFS_0p50 4 34 2 59 15 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 780000 2022-09-29 18:00:00 NA NA 64 43 HU NA NA 27.8 -82.8 28.9 -80.1 157.10617 -142.5705 -66.00002 -143.61981 -63.61459 12.92327 43.03906 969 986 73 65 210.25 212.5 471 360 99 140 131 150 140 200 49.25 86.66667 41 120 35 NA 53 40 68 100 35 40 NA NA NA NA 33 NA 37 40 1009 1008 275 260 25 40 NA 80 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 43 CIRA_DIAG_RT GFS GFS_0p50 4 30 1 59 0 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 840000 2022-09-30 00:00:00 NA NA 64 44 HU NA NA 27.8 -82.8 29.6 -79.4 209.00427 -178.9379 -108.00007 -198.98311 -63.81831 12.92327 81.60885 975 986 74 70 207.75 227.5 449 420 95 150 131 120 156 220 56 85 40 60 50 80 65 80 69 120 40.5 50 NA NA NA NA 41 40 40 60 1007 1008 167 260 25 40 NA 85 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 44 CIRA_DIAG_RT GFS GFS_0p50 4 30 1 58 0 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 900000 2022-09-30 06:00:00 NA NA 64 45 HU NA NA 28 -82.7 30.3 -79.1 233.73096 -188.64298 -137.99995 -195.00241 -128.78102 -7.21147 122.45998 981 984 55 75 176.25 202.5 396 420 48 130 122 100 139 160 59.5 85 NA 80 NA 60 56 80 63 120 NA 50 NA NA NA NA NA 40 NA 60 1005 1007 125 180 44 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 45 CIRA_DIAG_RT GFS GFS_0p50 4 29 4 57 -5 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 960000 2022-09-30 12:00:00 NA NA 64 46 HU NA NA 28.6 -82.7 31.5 -79 259.23284 -192.16054 -173.99998 -187.22739 -179.22993 -6.44382 90.4944 985 980 51 75 108 157.5 NA 240 NA 130 99 120 117 140 30.5 75 NA 80 NA 60 30 80 31 80 NA 46.66667 NA 40 NA NA NA 40 NA 60 1009 1008 307 210 28 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 46 CIRA_DIAG_RT GFS GFS_0p50 4 22 8 57 9 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 1020000 2022-09-30 18:00:00 NA NA 64 47 HU NA NA 29.6 -82.8 33.3 -79.2 288.51176 -184.26901 -221.99993 -203.78021 -204.16337 -11.77626 6.17339 988 977 37 75 82.66667 120 95 190 NA 130 78 100 75 60 NA 65 NA 80 NA 60 NA 80 NA 40 NA 35 NA NA NA 30 NA 40 NA NA 1008 1007 256 210 36 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 47 CIRA_DIAG_RT GFS GFS_0p50 4 25 9 57 -44 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 1080000 2022-10-01 00:00:00 NA NA 64 48 EX NA NA 30.3 -83.2 34.4 -79.3 315.58541 -197.68177 -246.00014 -230.3756 -215.60466 -53.6993 -54.04712 993 990 34 50 NA 165 NA 160 NA 170 NA NA NA NA NA 90 NA NA NA 90 NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1005 1007 134 210 63 80 NA 60 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 48 CIRA_DIAG_RT GFS GFS_0p50 4 36 8 53 -70 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 1140000 2022-10-01 06:00:00 NA NA 64 49 EX NA NA 31 -83.7 35.3 -79.7 327.01693 -200.93804 -257.99995 -173.49691 -277.12884 -63.46339 -109.86324 997 1005 29 15 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1005 1010 133 400 140 200 NA 40 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 49 CIRA_DIAG_RT GFS GFS_0p50 4 41 10 49 -112 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 1200000 2022-10-01 12:00:00 NA NA 64 50 EX NA NA 32.1 -84.2 35.8 -79.9 308.36248 -214.01729 -222.00005 -144.84083 -272.16596 -115.03808 -146.50467 1000 1005 21 20 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1008 1010 264 405 161 400 NA 30 NA NA NA 0 NA 0 NA M NA NA NA NA NA TCDIAG 50 CIRA_DIAG_RT GFS GFS_0p50 4 35 10 46 -222 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 1260000 2022-10-01 18:00:00 NA NA 64 51 EX NA NA 32.7 -84.7 36.7 -79.2 362.22614 -271.30753 -240 -347.41422 -102.29308 -144.36449 -139.18918 1003 1008 18 20 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1007 1009 115 60 141 55 NA 30 NA NA NA 0 NA 0 NA M NA NA NA NA NA TCDIAG 51 CIRA_DIAG_RT GFS GFS_0p50 4 38 7 44 -287 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 000000 2022-09-26 12:00:00 NA NA 53 29 HU NA BJR 18.7 -82.4 18.7 -82.4 0 0 0 0 0 189.05301 189.05301 NA 981 70 70 75 75 90 90 80 80 40 40 90 90 36.66667 36.66667 40 40 40 40 NA NA 30 30 20 20 20 20 NA NA NA NA NA NA NA 1008 NA 150 NA 15 85 85 NA NA 325 0 12 0 NA D NA NA NA NA NA TCDIAG 29 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 5.3 375 64.7 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 120000 2022-09-27 00:00:00 NA NA 53 32 HU NA BJR 20.7 -83.5 20.8 -83.3 12.72477 -11.22145 -5.99991 -3.03036 -12.35631 87.28262 61.91212 NA 965 90 85 92.5 85 110 100 100 90 60 60 100 90 42.5 37.5 50 50 50 50 30 20 40 30 18.75 25 20 30 20 25 15 NA 20 20 NA 1006 NA 130 NA 20 110 105 NA NA 335 0 12 0 NA D NA NA NA NA NA TCDIAG 32 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 8.5 133 65.6 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 240000 2022-09-27 12:00:00 NA NA 53 35 HU NA BJR 22.7 -84 22.6 -83.6 22.94737 -22.14907 6.00002 5.99908 -22.14505 -4.86904 -16.52652 NA 963 105 100 110 115 130 120 120 120 80 100 110 120 50 57.5 60 60 50 60 40 50 50 60 22.5 23.75 25 30 25 25 20 20 20 20 NA 1009 NA 270 NA 15 130 135 NA NA 345 0 10 0 NA D NA NA NA NA NA TCDIAG 35 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 16.3 15 67.1 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 360000 2022-09-28 00:00:00 NA NA 53 37 HU NA BJR 24.5 -84 24.4 -83 54.94794 -54.61937 6.00002 -10.17517 -53.98754 105.63657 93.44619 NA 947 120 105 125 115 140 120 130 120 100 100 130 120 60 57.5 70 60 60 60 50 50 60 60 27.5 27.5 30 35 30 35 25 20 25 20 NA 1008 NA 180 NA 20 145 130 NA NA 360 0 9 0 NA D NA NA NA NA NA TCDIAG 37 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 25.5 202 63.9 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 480000 2022-09-28 12:00:00 NA NA 53 39 HU NA BJR 26.1 -83.8 26 -82.7 59.59826 -59.29547 6.00002 -7.18689 -59.15253 100.43401 36.89529 NA 937 120 135 140 127.5 160 120 140 140 110 100 150 150 70 70 80 70 70 60 60 70 70 80 36.25 37.5 40 40 40 40 30 30 35 40 NA 1010 NA 270 NA 20 145 165 NA NA 5 0 8 0 NA D NA NA NA NA NA TCDIAG 39 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 28.7 198 63.3 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 600000 2022-09-29 00:00:00 NA NA 53 41 HU NA BJR 27.2 -83.5 27.2 -81.7 96.05713 -96.05713 0 -69.24244 -66.55174 52.14397 -36.81872 NA 960 105 100 145 160 180 180 130 120 120 120 150 220 70 57.5 80 50 70 60 60 50 70 70 NA 36.25 NA 30 NA 40 NA 30 NA 45 NA 1010 NA 250 NA 20 130 120 NA NA 15 0 6 0 NA D NA NA NA NA NA TCDIAG 41 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 36.9 95 62.4 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 720000 2022-09-29 12:00:00 NA NA 53 43 TS NA BJR 28 -83.2 28.4 -80.6 139.56233 -137.48326 -23.99998 -93.60119 -103.48648 12.92327 5.2729 NA 987 90 60 145 207.5 180 360 130 120 120 150 150 200 70 55 90 60 50 50 60 40 80 70 NA NA NA NA NA NA NA NA NA NA NA 1009 NA 240 NA 40 110 75 NA NA 20 0 4 0 NA D NA NA NA NA NA TCDIAG 43 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 36.5 67 60.7 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 960000 2022-09-30 12:00:00 NA NA 53 47 HU NA BJR 29.8 -82.9 31.5 -79 225.67587 -201.30969 -102.00005 -116.07609 -193.48799 -31.75798 90.4944 NA 980 55 75 NA 157.5 NA 240 NA 130 NA 120 NA 140 NA 75 NA 80 NA 60 NA 80 NA 80 NA 46.66667 NA 40 NA NA NA 40 NA 60 NA 1008 NA 210 NA 40 65 90 NA NA 10 0 5 0 NA D NA NA NA NA NA TCDIAG 47 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 35.2 -47 59.3 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 1200000 2022-10-01 12:00:00 NA NA 53 51 EX NA BJR 32.8 -82.6 35.8 -79.9 224.29862 -133.82777 -180 -129.72256 -182.93116 -96.75321 -146.50467 NA 1005 30 20 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1010 NA 405 NA 400 40 30 NA NA 5 0 8 0 NA M NA NA NA NA NA TCDIAG 51 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 41.9 -213 54.1 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 000000 2022-09-26 12:00:00 NA NA 55 29 HU NA NA 18.7 -82.3 18.7 -82.4 5.68317 5.68317 0 -2.80125 4.94366 189.05301 189.05301 NA 981 70 70 NA 75 NA 90 NA 80 NA 40 NA 90 NA 36.66667 NA 40 NA 40 NA NA NA 30 NA 20 NA 20 NA NA NA NA NA NA NA 1008 NA 150 NA 15 NA 85 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 29 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 5.3 375 64.7 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 120000 2022-09-27 00:00:00 NA NA 55 31 HU NA NA 20.7 -83.6 20.8 -83.3 17.86956 -16.83218 -5.99991 -1.63974 -17.79093 87.28262 61.91212 NA 965 91 85 NA 85 NA 100 NA 90 NA 60 NA 90 NA 37.5 NA 50 NA 50 NA 20 NA 30 NA 25 NA 30 NA 25 NA NA NA 20 NA 1006 NA 130 NA 20 NA 105 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 31 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 8.5 133 65.6 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 240000 2022-09-27 12:00:00 NA NA 55 34 HU NA NA 22.7 -84.1 22.6 -83.6 28.32892 -27.68624 6.00002 5.99911 -27.68121 -4.86904 -16.52652 NA 963 109 100 NA 115 NA 120 NA 120 NA 100 NA 120 NA 57.5 NA 60 NA 60 NA 50 NA 60 NA 23.75 NA 30 NA 25 NA 20 NA 20 NA 1009 NA 270 NA 15 NA 135 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 34 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 16.3 15 67.1 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 360000 2022-09-28 00:00:00 NA NA 55 36 HU NA NA 24.5 -84.1 24.4 -83 60.38008 -60.08122 6.00002 -11.76652 -59.2114 105.63657 93.44619 NA 947 122 105 NA 115 NA 120 NA 120 NA 100 NA 120 NA 57.5 NA 60 NA 60 NA 50 NA 60 NA 27.5 NA 35 NA 35 NA 20 NA 20 NA 1008 NA 180 NA 20 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 36 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 25.5 202 63.9 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 480000 2022-09-28 12:00:00 NA NA 55 38 HU NA NA 26.1 -84.1 26 -82.7 75.70476 -75.46662 6.00002 -10.74289 -74.92488 100.43401 36.89529 NA 937 124 135 NA 127.5 NA 120 NA 140 NA 100 NA 150 NA 70 NA 70 NA 60 NA 70 NA 80 NA 37.5 NA 40 NA 40 NA 30 NA 40 NA 1010 NA 270 NA 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 38 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 28.7 198 63.3 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 600000 2022-09-29 00:00:00 NA NA 55 40 HU NA NA 27.3 -83.6 27.2 -81.7 101.52548 -101.34804 5.99991 -68.89943 -74.54246 41.32911 -36.81872 NA 960 119 100 NA 160 NA 180 NA 120 NA 120 NA 220 NA 57.5 NA 50 NA 60 NA 50 NA 70 NA 36.25 NA 30 NA 40 NA 30 NA 45 NA 1010 NA 250 NA 20 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 40 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 36.9 95 62.4 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 720000 2022-09-29 12:00:00 NA NA 55 42 TS NA NA 28.1 -83.5 28.4 -80.6 154.32837 -153.27507 -17.99995 -96.94613 -120.04211 38.81667 5.2729 NA 987 106 60 NA 207.5 NA 360 NA 120 NA 150 NA 200 NA 55 NA 60 NA 50 NA 40 NA 70 NA NA NA NA NA NA NA NA NA NA NA 1009 NA 240 NA 40 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 42 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 36.5 67 60.7 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 840000 2022-09-30 00:00:00 NA NA 55 44 HU NA NA 28.8 -83.2 29.6 -79.4 204.73239 -199.02599 -48.00007 -166.98451 -118.39102 7.92187 81.60885 NA 986 94 70 NA 227.5 NA 420 NA 150 NA 120 NA 220 NA 85 NA 60 NA 80 NA 80 NA 120 NA 50 NA NA NA NA NA 40 NA 60 NA 1008 NA 260 NA 40 NA 85 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 44 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 36.9 51 59.7 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 960000 2022-09-30 12:00:00 NA NA 55 46 HU NA NA 29.7 -82.8 31.5 -79 224.004 -196.24934 -107.99995 -121.69883 -188.01359 -11.77626 90.4944 NA 980 77 75 NA 157.5 NA 240 NA 130 NA 120 NA 140 NA 75 NA 80 NA 60 NA 80 NA 80 NA 46.66667 NA 40 NA NA NA 40 NA 60 NA 1008 NA 210 NA 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 46 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 35.2 -47 59.3 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 1080000 2022-10-01 00:00:00 NA NA 55 48 EX NA NA 31.1 -82.8 34.4 -79.3 265.32624 -176.61819 -198.00007 -184.10614 -190.99108 -75.92581 -54.04712 NA 990 66 50 NA 165 NA 160 NA 170 NA NA NA NA NA 90 NA NA NA 90 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1007 NA 210 NA 80 NA 60 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 48 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 44.2 -131 56.3 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 12:00:00 1200000 2022-10-01 12:00:00 NA NA 55 50 EX NA NA 33 -83.1 35.8 -79.9 230.91427 -158.42164 -167.99995 -110.70377 -202.60029 -117.65192 -146.50467 NA 1005 54 20 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1010 NA 405 NA 400 NA 30 NA NA NA 0 NA 0 NA M NA NA NA NA NA TCDIAG 50 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 41.9 -213 54.1 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 000000 2022-09-26 18:00:00 NA NA 71 30 HU NA NA 19.8 -83.2 19.7 -83 12.78874 -11.29394 5.99991 8.61078 -9.45238 127.23096 153.56557 982 976 61 80 80.5 85 95 100 94 90 65 60 68 90 46.25 37.5 55 50 56 50 37 20 37 30 NA 25 NA 30 NA 25 NA NA NA 20 1006 1008 100 150 38 20 NA 100 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 30 CIRA_DIAG_RT GFS GFS_0p50 4 3 11 65 245 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 060000 2022-09-27 00:00:00 NA NA 71 31 HU NA NA 20.9 -83.4 20.8 -83.3 8.21211 -5.60701 6.00002 7.20139 -3.94392 61.91212 61.91212 975 965 68 85 84 85 96 100 91 90 69 60 80 90 45.75 37.5 52 50 52 50 39 20 40 30 39 25 39 30 NA 25 NA NA NA 20 1007 1006 202 130 27 20 NA 105 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 31 CIRA_DIAG_RT GFS GFS_0p50 4 3 11 65 130 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 120000 2022-09-27 06:00:00 NA NA 71 32 HU NA NA 21.9 -83.6 21.8 -83.6 6.00002 0 6.00002 5.7775 1.61487 12.00915 12.00915 969 956 77 100 95.25 90 104 100 107 100 73 70 97 90 41 42.5 53 50 45 50 43 30 23 40 32.33333 23.75 32 30 33 25 32 20 NA 20 989 1008 39 200 22 15 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 32 CIRA_DIAG_RT GFS GFS_0p50 4 9 10 65 33 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 180000 2022-09-27 12:00:00 NA NA 71 34 HU NA NA 22.9 -83.7 22.6 -83.6 18.83119 -5.53312 17.99995 17.99675 -5.53201 11.7181 -16.52652 968 963 75 100 116.5 115 141 120 116 120 88 100 121 120 55 57.5 57 60 58 60 47 50 58 60 35.33333 23.75 37 30 NA 25 34 20 35 20 1008 1009 180 270 23 15 NA 135 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 34 CIRA_DIAG_RT GFS GFS_0p50 4 12 10 65 18 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 240000 2022-09-27 18:00:00 NA NA 71 35 HU NA NA 23.8 -83.7 23.5 -83.3 28.41291 -21.98398 17.99995 10.76045 -26.29098 70.30835 41.01329 964 951 80 105 124.25 115 168 120 113 120 88 100 128 120 61 57.5 60 60 63 60 58 50 63 60 45 27.5 43 35 43 35 51 20 43 20 1008 1009 165 270 27 15 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 35 CIRA_DIAG_RT GFS GFS_0p50 4 13 9 66 113 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 300000 2022-09-28 00:00:00 NA NA 71 36 HU NA NA 24.7 -83.5 24.4 -83 32.69 -27.28797 18.00007 9.26519 -31.34338 99.60315 93.44619 955 947 83 105 133 115 138 120 128 120 124 100 142 120 67.25 57.5 69 60 65 60 60 50 75 60 48 27.5 47 35 44 35 48 20 53 20 979 1008 41 180 28 20 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 36 CIRA_DIAG_RT GFS GFS_0p50 4 19 9 65 206 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 360000 2022-09-28 06:00:00 NA NA 71 37 HU NA NA 25.5 -83.6 25.2 -82.9 42.00747 -37.95562 17.99995 13.60358 -39.73581 98.52227 88.74855 954 945 99 120 135.25 125 126 120 160 130 107 100 148 150 70 65 79 70 62 60 59 60 80 70 48.5 33.75 48 35 47 35 45 30 54 35 974 1010 30 200 25 20 NA 130 NA 25 NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 37 CIRA_DIAG_RT GFS GFS_0p50 4 24 8 64 191 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 420000 2022-09-28 12:00:00 NA NA 71 38 HU NA NA 26.3 -83.4 26 -82.7 41.77779 -37.70127 17.99995 9.26562 -40.72964 61.40549 36.89529 951 937 91 135 150.75 127.5 136 120 121 140 152 100 194 150 74 70 75 70 67 60 72 70 82 80 49.75 37.5 46 40 51 40 47 30 55 40 971 1010 30 270 22 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 38 CIRA_DIAG_RT GFS GFS_0p50 4 22 7 64 118 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 480000 2022-09-28 18:00:00 NA NA 71 39 HU NA NA 26.8 -83.4 26.6 -82.4 54.92907 -53.60228 11.99993 -10.97227 -53.81194 52.14397 22.9944 946 938 93 135 152.25 132.5 132 130 123 150 152 100 202 150 74.5 65 85 50 53 60 74 70 86 80 48.25 36.25 44 30 41 40 48 30 60 45 966 1010 29 270 26 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 39 CIRA_DIAG_RT GFS GFS_0p50 4 25 4 61 92 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 540000 2022-09-29 00:00:00 NA NA 71 40 HU NA NA 27.1 -83.1 27.2 -81.7 74.98497 -74.74454 -6.00002 -58.03636 -47.46053 26.0837 -36.81872 953 960 95 100 197.75 160 295 180 115 120 169 120 212 220 66.25 57.5 67 50 44 60 76 50 78 70 49.25 36.25 52 30 39 40 50 30 56 45 977 1010 40 250 24 20 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 40 CIRA_DIAG_RT GFS GFS_0p50 4 34 3 60 55 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 600000 2022-09-29 06:00:00 NA NA 71 41 TS NA NA 27.3 -82.9 27.7 -81.1 98.75795 -95.79733 -24.00009 -86.2468 -48.074 20.90425 -34.57149 959 986 85 60 212.75 207.5 427 360 111 120 143 150 170 200 57.5 55 42 60 50 50 62 40 76 70 38.75 NA 31 NA 30 NA 46 NA 48 NA 983 1010 34 240 27 20 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 41 CIRA_DIAG_RT GFS GFS_0p50 4 35 3 59 31 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 660000 2022-09-29 12:00:00 NA NA 71 42 TS NA NA 27.6 -82.8 28.4 -80.6 126.04658 -116.54932 -47.99995 -102.73886 -72.98456 20.90425 5.2729 964 987 79 60 207 207.5 442 360 99 120 133 150 154 200 53.25 55 42 60 41 50 62 40 68 70 40.66667 NA NA NA 35 NA 40 NA 47 NA 984 1009 34 240 22 40 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 42 CIRA_DIAG_RT GFS GFS_0p50 4 30 3 60 22 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 720000 2022-09-29 18:00:00 NA NA 71 43 HU NA NA 27.8 -82.9 28.9 -80.1 161.91313 -147.8508 -66.00002 -147.10201 -67.58266 12.92327 43.03906 969 986 72 65 197.25 212.5 414 360 105 140 126 150 144 200 61.25 86.66667 56 120 53 NA 63 40 73 100 39 40 NA NA NA NA 37 NA 41 40 1009 1008 214 260 28 40 NA 80 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 43 CIRA_DIAG_RT GFS GFS_0p50 4 29 2 60 10 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 780000 2022-09-30 00:00:00 NA NA 71 44 HU NA NA 28 -82.8 29.6 -79.4 202.91258 -178.76664 -96.00002 -189.82995 -71.57707 12.92327 81.60885 969 986 75 70 203 227.5 385 420 92 150 144 120 191 220 56 85 43 60 33 80 66 80 82 120 41 50 NA NA NA NA 41 40 41 60 1005 1008 134 260 24 40 NA 85 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 44 CIRA_DIAG_RT GFS GFS_0p50 4 33 2 58 5 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 840000 2022-09-30 06:00:00 NA NA 71 45 HU NA NA 28.2 -83 30.3 -79.1 239.91452 -204.16415 -125.99991 -189.15711 -147.50536 12.92327 122.45998 974 984 60 75 145.5 202.5 261 420 79 130 109 100 133 160 52.33333 85 34 80 NA 60 56 80 67 120 NA 50 NA NA NA NA NA 40 NA 60 1006 1007 164 180 39 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 45 CIRA_DIAG_RT GFS GFS_0p50 4 34 5 57 29 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 900000 2022-09-30 12:00:00 NA NA 71 46 HU NA NA 28.9 -83.1 31.5 -79 263.70375 -212.61152 -156.00002 -170.73474 -200.90862 7.92187 90.4944 978 980 55 75 82.75 157.5 46 240 68 130 102 120 115 140 41.5 75 NA 80 NA 60 41 80 42 80 NA 46.66667 NA 40 NA NA NA 40 NA 60 1010 1008 723 210 22 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 46 CIRA_DIAG_RT GFS GFS_0p50 4 32 7 58 29 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 960000 2022-09-30 18:00:00 NA NA 71 47 HU NA NA 29.6 -83.4 33.3 -79.2 309.0316 -214.98038 -221.99993 -200.91201 -234.73498 7.88058 6.17339 981 977 53 75 78 120 NA 190 61 130 112 100 61 60 22.5 65 NA 80 NA 60 23 80 22 40 NA 35 NA NA NA 30 NA 40 NA NA 1005 1007 136 210 22 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 47 CIRA_DIAG_RT GFS GFS_0p50 4 32 7 57 0 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 1020000 2022-10-01 00:00:00 NA NA 71 48 EX NA NA 30.3 -83.4 34.4 -79.3 322.03265 -207.81954 -246.00014 -229.6124 -225.71185 -36.46659 -54.04712 989 990 34 50 NA 165 NA 160 NA 170 NA NA NA NA NA 90 NA NA NA 90 NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1005 1007 139 210 37 80 NA 60 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 48 CIRA_DIAG_RT GFS GFS_0p50 4 39 8 53 -57 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 1080000 2022-10-01 06:00:00 NA NA 71 49 EX NA NA 31.1 -83.7 35.3 -79.7 322.23286 -200.82344 -251.99993 -167.90042 -274.96559 -63.46339 -109.86324 994 1005 30 15 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1006 1010 179 400 130 200 NA 40 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 49 CIRA_DIAG_RT GFS GFS_0p50 4 39 9 50 -123 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 1140000 2022-10-01 12:00:00 NA NA 71 50 EX NA NA 32.1 -84 35.8 -79.9 301.53903 -204.06314 -222.00005 -147.92089 -262.70222 -115.03808 -146.50467 998 1005 27 20 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1010 1010 693 405 157 400 NA 30 NA NA NA 0 NA 0 NA M NA NA NA NA NA TCDIAG 50 CIRA_DIAG_RT GFS GFS_0p50 4 29 10 48 -224 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 1200000 2022-10-01 18:00:00 NA NA 71 51 EX NA NA 32.9 -84.6 36.7 -79.2 350.38246 -266.05242 -227.99995 -334.45842 -104.2172 -174.30011 -139.18918 1001 1008 19 20 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1009 1009 353 60 153 55 NA 30 NA NA NA 0 NA 0 NA M NA NA NA NA NA TCDIAG 51 CIRA_DIAG_RT GFS GFS_0p50 4 32 7 46 -311 +V11.1.0 GFSO BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 1260000 2022-10-02 00:00:00 NA NA 71 52 EX NA NA 33.2 -85 37.3 -78.5 402.43256 -318.49021 -245.99991 -397.02394 -65.31169 -164.00543 -124.78046 1003 1009 17 15 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1007 1010 122 45 124 40 NA 20 NA NA NA 0 NA 0 NA S NA NA NA NA NA TCDIAG 52 CIRA_DIAG_RT GFS GFS_0p50 4 34 4 44 -324 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 000000 2022-09-26 18:00:00 NA NA 53 30 HU NA BJR 19.7 -83 19.7 -83 0 0 0 0 0 153.56557 153.56557 NA 976 80 80 85 85 100 100 90 90 60 60 90 90 37.5 37.5 50 50 50 50 20 20 30 30 25 25 30 30 25 25 NA NA 20 20 NA 1008 NA 150 NA 20 100 100 NA NA 330 0 11 0 NA D NA NA NA NA NA TCDIAG 30 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 4.8 265 66.2 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 120000 2022-09-27 06:00:00 NA NA 53 33 HU NA BJR 21.7 -83.8 21.8 -83.6 12.65826 -11.14597 -5.99991 -2.77751 -12.34744 21.17544 12.00915 NA 956 105 100 100 90 120 100 110 100 70 70 100 90 50 42.5 60 50 50 50 40 30 50 40 23.75 23.75 30 30 25 25 20 20 20 20 NA 1008 NA 200 NA 15 130 120 NA NA 340 0 10 0 NA D NA NA NA NA NA TCDIAG 33 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 13.9 28 66.3 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 240000 2022-09-27 18:00:00 NA NA 53 36 HU NA BJR 23.6 -84 23.5 -83.3 38.96644 -38.50173 6.00002 -5.55254 -38.56171 49.50692 41.01329 NA 951 115 105 115 115 130 120 120 120 90 100 120 120 55 57.5 60 60 60 60 50 50 50 60 30 27.5 35 35 30 35 25 20 30 20 NA 1009 NA 270 NA 15 140 130 NA NA 355 0 10 0 NA D NA NA NA NA NA TCDIAG 36 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 19.4 112 67.1 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 360000 2022-09-28 06:00:00 NA NA 53 38 HU NA BJR 25.3 -83.9 25.2 -82.9 54.59798 -54.26731 5.99991 -0.15648 -54.58792 120.89156 88.74855 NA 945 120 120 130 125 140 120 130 130 110 100 140 150 65 65 70 70 70 60 60 60 60 70 32.5 33.75 35 35 35 35 30 30 30 35 NA 1010 NA 200 NA 20 145 130 NA 25 5 0 9 0 NA D NA NA NA NA NA TCDIAG 38 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 26.4 226 64.1 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 480000 2022-09-28 18:00:00 NA NA 53 40 HU NA BJR 26.7 -83.5 26.6 -82.4 59.29265 -58.98829 6.00002 -18.64895 -56.27227 61.40549 22.9944 NA 938 115 135 137.5 132.5 150 130 140 150 110 100 150 150 70 65 80 50 70 60 60 70 70 80 37.5 36.25 40 30 40 40 35 30 35 45 NA 1010 NA 270 NA 20 140 165 NA NA 15 0 7 0 NA D NA NA NA NA NA TCDIAG 40 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 29.2 117 62.9 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 600000 2022-09-29 06:00:00 NA NA 53 42 TS NA BJR 27.5 -83.2 27.7 -81.1 112.30453 -111.66157 -12.00005 -89.59608 -67.67795 20.90425 -34.57149 NA 986 100 60 145 207.5 180 360 130 120 120 150 150 200 70 55 80 60 70 50 60 40 70 70 NA NA NA NA NA NA NA NA NA NA NA 1010 NA 240 NA 20 120 75 NA NA 20 0 4 0 NA D NA NA NA NA NA TCDIAG 42 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 37.8 57 58.6 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 720000 2022-09-29 18:00:00 NA NA 53 44 HU NA BJR 28.1 -82.9 28.9 -80.1 155.24815 -147.64144 -47.99995 -133.43712 -79.29591 12.92327 43.03906 NA 986 75 65 145 212.5 180 360 130 140 120 150 150 200 55 86.66667 60 120 50 NA 50 40 60 100 NA 40 NA NA NA NA NA NA NA 40 NA 1008 NA 260 NA 40 90 80 NA NA 25 0 3 0 NA D NA NA NA NA NA TCDIAG 44 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 33.3 36 59.8 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 960000 2022-09-30 18:00:00 NA NA 53 48 HU NA BJR 30.1 -82.3 33.3 -79.2 248.812 -158.25118 -191.99993 -176.34663 -175.46222 -51.10696 6.17339 NA 977 45 75 NA 120 NA 190 NA 130 NA 100 NA 60 NA 65 NA 80 NA 60 NA 80 NA 40 NA 35 NA NA NA 30 NA 40 NA NA NA 1007 NA 210 NA 40 55 90 NA NA 15 0 5 0 NA D NA NA NA NA NA TCDIAG 48 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 31.5 -97 57 +V11.1.0 OFCL BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 1200000 2022-10-01 18:00:00 NA NA 53 52 EX NA BJR 33.5 -82 36.7 -79.2 236.12778 -137.4493 -192.00005 -235.64468 -14.41642 -96.38858 -139.18918 NA 1008 25 20 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1009 NA 60 NA 55 35 30 NA NA 5 0 9 0 NA M NA NA NA NA NA TCDIAG 52 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 33.7 -194 44.4 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 000000 2022-09-26 18:00:00 NA NA 55 30 HU NA NA 19.7 -83 19.7 -83 0 0 0 0 0 153.56557 153.56557 NA 976 80 80 NA 85 NA 100 NA 90 NA 60 NA 90 NA 37.5 NA 50 NA 50 NA 20 NA 30 NA 25 NA 30 NA 25 NA NA NA 20 NA 1008 NA 150 NA 20 NA 100 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 30 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 4.8 265 66.2 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 120000 2022-09-27 06:00:00 NA NA 55 32 HU NA NA 21.8 -83.8 21.8 -83.6 11.14208 -11.14208 0 2.99883 -10.72886 5.05424 12.00915 NA 956 100 100 NA 90 NA 100 NA 100 NA 70 NA 90 NA 42.5 NA 50 NA 50 NA 30 NA 40 NA 23.75 NA 30 NA 25 NA 20 NA 20 NA 1008 NA 200 NA 15 NA 120 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 32 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 13.9 28 66.3 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 240000 2022-09-27 18:00:00 NA NA 55 35 HU NA NA 23.7 -84 23.5 -83.3 40.31446 -38.48707 12.00005 0.187 -40.30677 49.50692 41.01329 NA 951 118 105 NA 115 NA 120 NA 120 NA 100 NA 120 NA 57.5 NA 60 NA 60 NA 50 NA 60 NA 27.5 NA 35 NA 35 NA 20 NA 20 NA 1009 NA 270 NA 15 NA 130 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 35 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 19.4 112 67.1 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 360000 2022-09-28 06:00:00 NA NA 55 37 HU NA NA 25.4 -83.8 25.2 -82.9 50.27369 -48.82054 11.99993 6.41818 -49.85319 120.89156 88.74855 NA 945 128 120 NA 125 NA 120 NA 130 NA 100 NA 150 NA 65 NA 70 NA 60 NA 60 NA 70 NA 33.75 NA 35 NA 35 NA 30 NA 35 NA 1010 NA 200 NA 20 NA 130 NA 25 NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 37 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 26.4 226 64.1 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 480000 2022-09-28 18:00:00 NA NA 55 39 HU NA NA 26.8 -83.6 26.6 -82.4 65.43235 -64.32258 11.99993 -15.35632 -63.59271 52.14397 22.9944 NA 938 128 135 NA 132.5 NA 130 NA 150 NA 100 NA 150 NA 65 NA 50 NA 60 NA 70 NA 80 NA 36.25 NA 30 NA 40 NA 30 NA 45 NA 1010 NA 270 NA 20 NA 165 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 39 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 29.2 117 62.9 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 600000 2022-09-29 06:00:00 NA NA 55 41 TS NA NA 27.7 -83.3 27.7 -81.1 116.8722 -116.8722 0 -85.18065 -79.9903 41.32911 -34.57149 NA 986 115 60 NA 207.5 NA 360 NA 120 NA 150 NA 200 NA 55 NA 60 NA 50 NA 40 NA 70 NA NA NA NA NA NA NA NA NA NA NA 1010 NA 240 NA 20 NA 75 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 41 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 37.8 57 58.6 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 720000 2022-09-29 18:00:00 NA NA 55 43 HU NA NA 28.5 -83.1 28.9 -80.1 159.69999 -157.88631 -23.99998 -122.15769 -102.82218 19.30555 43.03906 NA 986 100 65 NA 212.5 NA 360 NA 140 NA 150 NA 200 NA 86.66667 NA 120 NA NA NA 40 NA 100 NA 40 NA NA NA NA NA NA NA 40 NA 1008 NA 260 NA 40 NA 80 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 43 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 33.3 36 59.8 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 840000 2022-09-30 06:00:00 NA NA 55 45 HU NA NA 29.3 -82.8 30.3 -79.1 201.77159 -192.64417 -60 -123.28695 -159.67919 -11.77626 122.45998 NA 984 86 75 NA 202.5 NA 420 NA 130 NA 100 NA 160 NA 85 NA 80 NA 60 NA 80 NA 120 NA 50 NA NA NA NA NA 40 NA 60 NA 1007 NA 180 NA 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 45 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 38.5 -15 58 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 960000 2022-09-30 18:00:00 NA NA 55 47 HU NA NA 30.5 -82.8 33.3 -79.2 248.69971 -183.3782 -167.99995 -150.10921 -198.23345 -53.6993 6.17339 NA 977 73 75 NA 120 NA 190 NA 130 NA 100 NA 60 NA 65 NA 80 NA 60 NA 80 NA 40 NA 35 NA NA NA 30 NA 40 NA NA NA 1007 NA 210 NA 40 NA 90 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 47 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 31.5 -97 57 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 1080000 2022-10-01 06:00:00 NA NA 55 49 EX NA NA 31.9 -82.6 35.3 -79.7 250.24033 -144.92837 -203.99998 -141.96379 -206.01933 -70.34722 -109.86324 NA 1005 60 15 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1010 NA 400 NA 200 NA 40 NA NA NA 0 NA 0 NA D NA NA NA NA NA TCDIAG 49 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 36.9 -140 53.8 +V11.1.0 SHIP BEST NA AL092022 AL 09 IAN 2022-09-26 18:00:00 1200000 2022-10-01 18:00:00 NA NA 55 51 EX NA NA 33.8 -82.2 36.7 -79.2 227.77994 -146.99548 -174.00009 -225.47033 -32.06469 -117.32048 -139.18918 NA 1008 49 20 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA 1009 NA 60 NA 55 NA 30 NA NA NA 0 NA 0 NA M NA NA NA NA NA TCDIAG 51 SHIPS_DIAG_RT SHIPS_TRK GFS_0p50 3 33.7 -194 44.4 diff --git a/test/tcmpr_plots/tcmpr_point_tcdiag.yaml b/test/tcmpr_plots/tcmpr_point_tcdiag.yaml new file mode 100755 index 00000000..636e8d63 --- /dev/null +++ b/test/tcmpr_plots/tcmpr_point_tcdiag.yaml @@ -0,0 +1,162 @@ +colors: + # - 'green' + - 'blue' + - 'orange' + - 'red' + - 'purple' + - 'green' + - 'blue' + - 'orange' + - 'red' + - 'purple' + +plot_disp: + - 'True' + - 'True' + - 'True' + - 'True' + +series_order: + - 1 + - 2 + - 3 + - 4 + +mar: + l: 0 #left margin + r: 0 #right margin + b: 110 #bottom margin + t: 100 #top margin +title_offset: -2.03 + +series_val_1: + AMODEL: + - 'OFCL' + - 'GFSO' + - 'SHIP' + +fixed_vars_vals_input: + BASIN: + - AL + LEVEL: + - HU + - TS + - EX + INIT: + - '2022-09-26 00:00:00' + +series_ci: + - 'True' + - 'True' + - 'True' + - 'True' +series_line_width: + - 1 + - 1 + - 1 + - 1 + +series_line_style: + - '-' + - '-' + - '-' + - '-' +series_symbols: + - 'circle-open' + - 'circle-open' + - 'circle-open' + - 'circle-open' +series_symbols_size: + - 7 + - 7 + - 7 + - 7 + + + +tcst_dir: './Data/TCDiag/' +tcst_files: [ ] +title: '' +title_size: 1. + +yaxis_1: '' + +plot_type_list: + - 'point' + + + +event_equal: 'False' + + +plot_dir: './output/' +prefix: '' + + +indy_vals: + - 0 + - 6 + - 12 + - 18 + - 24 + - 30 + - 36 + - 42 + - 48 + - 54 + - 60 + - 66 + - 72 + - 78 + - 84 + - 90 + - 96 + - 102 + - 108 + - 114 + - 120 + - 126 + +indy_label: + - '00' + - '06' + - '12' + - '18' + - '24' + - '30' + - '36' + - '42' + - '48' + - '54' + - '60' + - '66' + - '72' + - '78' + - '84' + - '90' + - '96' + - '102' + - '108' + - '114' + - '120' + - '126' + +list_stat_1: + - "SHEAR_MAGNITUDE" + # - "TK_ERR" + - "AMAX_WIND-BMAX_WIND" + + +rp_diff: + - '>=100' + +subtitle: '' +is_tcdiag_linetype: true +hfip_bsln: 'no' +line_type: '-' + +# for line plot set to True +connect_points: True + +log_level: INFO +log_filename: './output/tcmpr.log' \ No newline at end of file diff --git a/test/tcmpr_plots/test_tcmpr_multi_plots.yaml b/test/tcmpr_plots/test_tcmpr_multi_plots.yaml index 81abf145..f94694ec 100755 --- a/test/tcmpr_plots/test_tcmpr_multi_plots.yaml +++ b/test/tcmpr_plots/test_tcmpr_multi_plots.yaml @@ -102,7 +102,7 @@ series_symbols_size: - 7 show_nstats: 'True' -tcst_dir: './Data/' +tcst_dir: !ENV '${TEST_DIR}/Data/' #tcst_dir: '/path/to/tcmpr_sample_data' tcst_files: [ ] title: '' @@ -133,11 +133,11 @@ event_equal: 'True' skill_ref: - HFSA -plot_dir: './output' +plot_dir: !ENV '${TEST_DIR}/output' prefix: '' subtitle: log_level: INFO -log_filename: './output/tcmpr_log.out' +log_filename: !ENV '${TEST_DIR}/output/tcmpr_log.out' baseline_file: './metplotpy/plots/tcmpr_plots/hfip_baseline.dat' column_info_file: './metplotpy/plots/tcmpr_plots/plot_tcmpr_hdr.dat' hfip_bsln: 'no' diff --git a/test/tcmpr_plots/test_tcmpr_plots.py b/test/tcmpr_plots/test_tcmpr_plots.py index c35ed402..56f7764a 100644 --- a/test/tcmpr_plots/test_tcmpr_plots.py +++ b/test/tcmpr_plots/test_tcmpr_plots.py @@ -1,32 +1,26 @@ import pytest import os -import re + import metplotpy.plots.tcmpr_plots.tcmpr as t +cwd = os.path.dirname(__file__) -@pytest.fixture -def setup(): - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../.." - custom_config_filename = "./test_tcmpr_multi_plots.yaml" +@pytest.fixture +def setup(setup_env): + setup_env(cwd) + custom_config_filename = f"{cwd}/test_tcmpr_multi_plots.yaml" # Invoke the command to generate a line plot based on # the custom config file t.main(custom_config_filename) - def test_plots_created(setup): - - - # Check for presence of fourteen plots (seven plot types, one each for the # ABS(X-Y) and TK_ERR columns. expected_num_plots = 14 - test_dir = os.getcwd() - output_dir = os.path.join(test_dir, 'output') + output_dir = os.path.join(cwd, 'output') only_files = os.listdir(output_dir) assert len(only_files) == expected_num_plots @@ -81,7 +75,6 @@ def test_plots_created(setup): # assert cur_file_size >= expected_size # - # Clean up try: for cur_file in only_files: @@ -89,4 +82,4 @@ def test_plots_created(setup): os.rmdir(output_dir) except FileNotFoundError: # If files already cleaned up, then ignore error - pass \ No newline at end of file + pass diff --git a/test/util/test_util.py b/test/util/test_util.py index 46427056..b22bcc04 100644 --- a/test/util/test_util.py +++ b/test/util/test_util.py @@ -1,8 +1,11 @@ +import os import pandas as pd import metplotpy.plots.util as util import gc +cwd = os.path.dirname(__file__) + def test_is_threshold(): """ @@ -14,20 +17,14 @@ def test_is_threshold(): not_thresholds = [1.0, 11, 77] mixed_thresholds = ['==0.0', '> 11', 21, '>= .5', '== 3'] - if not util.is_threshold_value(not_thresholds): - assert True - - if util.is_threshold_value(mixed_thresholds): - assert True + assert not any(util.is_threshold_value(not_thresholds)) + assert any(util.is_threshold_value(mixed_thresholds)) # test series series_not_thresholds = pd.Series(not_thresholds, range(len(not_thresholds))) series_mixed_thresholds = pd.Series(mixed_thresholds, range(len(mixed_thresholds))) - if not util.is_threshold_value(series_not_thresholds): - assert True - - if util.is_threshold_value(series_mixed_thresholds): - assert True + assert not any(util.is_threshold_value(series_not_thresholds)) + assert any(util.is_threshold_value(series_mixed_thresholds)) def test_sort_threshold_values(): @@ -41,8 +38,8 @@ def test_sort_threshold_values(): sorted_list = util.sort_threshold_values(threshold_list) - for idx, sorted in enumerate(sorted_list): - if sorted != expected_list[idx]: + for idx, sorted_val in enumerate(sorted_list): + if sorted_val != expected_list[idx]: assert False @@ -57,8 +54,8 @@ def test_sort_threshold_values_whitespace(): sorted_list = util.sort_threshold_values(threshold_list) - for idx, sorted in enumerate(sorted_list): - if sorted != expected_list[idx]: + for idx, sorted_val in enumerate(sorted_list): + if sorted_val != expected_list[idx]: assert False @@ -73,10 +70,11 @@ def test_sort_threshold_values_floats(): sorted_list = util.sort_threshold_values(threshold_list) - for idx, sorted in enumerate(sorted_list): - if sorted != expected_list[idx]: + for idx, sorted_val in enumerate(sorted_list): + if sorted_val != expected_list[idx]: assert False + def test_is_thresh(): """ Verify that the regular expression used to test for column labels that are threshold-types (i.e. fcst_thresh, obs_thresh, @@ -90,21 +88,17 @@ def test_is_thresh(): counter = 0 expected_none = len(no_thresh_cols) for cur_col in no_thresh_cols: - if not util.is_thresh_column(cur_col): - counter += 1 + if not util.is_thresh_column(cur_col): + counter += 1 - if counter == expected_none: - assert True + assert counter == expected_none counter = 0 expected_all = len(thresh_cols) for cur_col in thresh_cols: if util.is_thresh_column(cur_col): counter +=1 - if counter == expected_all: - assert True - else: - assert False + assert counter == expected_all expected_found = 2 counter = 0 @@ -112,10 +106,7 @@ def test_is_thresh(): if util.is_thresh_column(cur_col): counter +=1 - if counter == expected_found: - assert True - else: - assert False + assert counter == expected_found def test_filter_by_fixed_vars(): @@ -125,7 +116,6 @@ def test_filter_by_fixed_vars(): """ - settings_dict = {'fcst_thresh': ['NA', '>0.0', '>=0.254'], 'obtype': ['CCPA'], 'vx_mask': ['CONUS'], 'fcst_init_beg': ['2023-06-24 12:00:00']} @@ -135,7 +125,7 @@ def test_filter_by_fixed_vars(): "vx_mask in ('CONUS')", "fcst_init_beg in ('2023-06-24 12:00:00')"] - input_df = pd.read_csv("./full_thresh_data.txt", sep='\t') + input_df = pd.read_csv(f"{cwd}/full_thresh_data.txt", sep='\t') filtered_df = util.filter_by_fixed_vars(input_df, settings_dict) expected_df = input_df.copy(deep=True) @@ -146,20 +136,17 @@ def test_filter_by_fixed_vars(): # Verify that the expected and filtered dataframes have the same dimensions assert filtered_df.shape == expected_df.shape - filtered_list = filtered_df['fcst_thresh'].to_list() expected_list = expected_df['fcst_thresh'].to_list() for filtered in filtered_list: assert filtered in expected_list - # Clean up previous dataframes del intermed_df del expected_df gc.collect() - # Now test when there is only one value in fcst_thresh and it is NA settings_dict2 = {'fcst_thresh': ['NA'], 'obtype': ['CCPA'], 'vx_mask': ['CONUS'], 'fcst_init_beg': ['2023-06-24 12:00:00']} @@ -178,7 +165,6 @@ def test_filter_by_fixed_vars(): assert filtered_df.shape == expected_df.shape - filtered_list = filtered_df['fcst_thresh'].to_list() expected_list = expected_df['fcst_thresh'].to_list() diff --git a/test/wind_rose/minimal_wind_rose.yaml b/test/wind_rose/minimal_wind_rose.yaml index 256d73bd..76db4667 100644 --- a/test/wind_rose/minimal_wind_rose.yaml +++ b/test/wind_rose/minimal_wind_rose.yaml @@ -1,8 +1,8 @@ # minimal config file to use all the settings in the wind_rose_default.yaml file. # **NOTE**: update the stat_input and plot_filename settings with the full path to your # input file and output file, respectively. -stat_input: ./point_stat_mpr.txt -plot_filename: ./wind_rose_default.png +stat_input: !ENV '${TEST_DIR}/point_stat_mpr.txt' +plot_filename: !ENV '${TEST_DIR}/wind_rose_default.png' # Optional, uncomment and set to directory to store the .points1 file # that is used by METviewer (created when dump_points_1 is set to True) diff --git a/test/wind_rose/test_wind_rose.py b/test/wind_rose/test_wind_rose.py index 774a5917..04ac6284 100644 --- a/test/wind_rose/test_wind_rose.py +++ b/test/wind_rose/test_wind_rose.py @@ -3,15 +3,15 @@ from metplotpy.plots.wind_rose import wind_rose #from metcalcpy.compare_images import CompareImages +cwd = os.path.dirname(__file__) +CLEANUP_FILES = ['wind_rose_custom.png', 'point_stat_mpr.points1'] @pytest.fixture -def setup(): +def setup(remove_files, setup_env): # Cleanup the plotfile and point1 output file from any previous run - cleanup() - # Set up the METPLOTPY_BASE so that met_plot.py will correctly find - # the config directory containing all the default config files. - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "./wind_rose_custom.yaml" + remove_files(cwd, CLEANUP_FILES) + setup_env(cwd) + custom_config_filename = f"{cwd}/wind_rose_custom.yaml" # Invoke the command to generate a Wind rose Diagram based on # a custom config file. @@ -22,108 +22,81 @@ def cleanup(): # remove the .png and .points files # from any previous runs try: - path = os.getcwd() plot_file = 'wind_rose_custom.png' points_file_1 = 'point_stat_mpr.points1' - os.remove(os.path.join(path, plot_file)) - os.remove(os.path.join(path, points_file_1)) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. + os.remove(os.path.join(cwd, plot_file)) + os.remove(os.path.join(cwd, points_file_1)) + except OSError: pass @pytest.mark.parametrize("test_input,expected", (["wind_rose_expected.png", True], ["wind_rose_expected.points", True])) -def test_files_exist(setup, test_input, expected): +def test_files_exist(setup, test_input, expected, remove_files): ''' Checking that the plot and data files are getting created ''' - assert os.path.isfile(test_input) == expected - cleanup() - + assert os.path.isfile(f"{cwd}/{test_input}") == expected + remove_files(cwd, CLEANUP_FILES) @pytest.mark.parametrize("test_input,expected", - (["./intermed_files/wind_rose_custom_points.png", True], ["./intermed_files/point_stat_mpr.points1", True])) -def test_points1_files_exist( test_input, expected): + (["intermed_files/wind_rose_custom_points.png", True], ["intermed_files/point_stat_mpr.points1", True])) +def test_points1_files_exist(setup_env, test_input, expected, remove_files): ''' Checking that the plot file and points1 file are getting created where expected plot and point file are being saved in the intermed_files subdir ''' - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "wind_rose_custom_points.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/wind_rose_custom_points.yaml" try: - os.mkdir(os.path.join(os.getcwd(), './intermed_files')) - except FileExistsError as e: + os.mkdir(os.path.join(cwd, 'intermed_files')) + except FileExistsError: pass # Invoke the command to generate a Wind Rose Diagram based on # a custom config file. wind_rose.main(custom_config_filename) - assert os.path.isfile(test_input) == expected + assert os.path.isfile(f"{cwd}/{test_input}") == expected # remove the plot and points1 files that were created - cleanup() - try: - path = os.getcwd() - subdir = os.path.join(path, 'intermed_files') - plot_file = 'wind_rose_custom_points.png' - points_file_1 = 'point_stat_mpr.points1' - os.remove(os.path.join(subdir, plot_file)) - os.remove(os.path.join(subdir, points_file_1)) - os.rmdir(subdir) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass + remove_files(cwd, CLEANUP_FILES) @pytest.mark.parametrize("test_input,expected", - (["./intermed_files/wind_rose_points2.png", True], - ["./intermed_files/point_stat_mpr.points1", True])) -def test_points1_files_exist(test_input, expected): + (["intermed_files/wind_rose_points2.png", True], + ["intermed_files/point_stat_mpr.points1", True])) +def test_points1_files_exist(setup_env, test_input, expected, remove_files): ''' Checking that the plot file and points1 file are getting created where expected plot and point file are being saved in the intermed_files subdir. Verify that when no stat_file is specified, the point_stat_mpr.txt in the test dir is being used. ''' - os.environ['METPLOTPY_BASE'] = "../../" - custom_config_filename = "wind_rose_custom2_points.yaml" + setup_env(cwd) + custom_config_filename = f"{cwd}/wind_rose_custom2_points.yaml" try: - os.mkdir(os.path.join(os.getcwd(), './intermed_files')) - except FileExistsError as e: + os.mkdir(os.path.join(cwd, 'intermed_files')) + except FileExistsError: pass # Invoke the command to generate a Wind Rose Diagram based on # a custom config file. wind_rose.main(custom_config_filename) - assert os.path.isfile(test_input) == expected + assert os.path.isfile(f"{cwd}/{test_input}") == expected + + # remove the plot and points1 files and intermed_files that were created + remove_files(cwd, CLEANUP_FILES) + - # remove the plot and points1 files that were created - cleanup() - try: - path = os.getcwd() - subdir = os.path.join(path, 'intermed_files') - plot_file = 'wind_rose_points2.png' - points_file_1 = 'point_stat_mpr.points1' - os.remove(os.path.join(subdir, plot_file)) - os.remove(os.path.join(subdir, points_file_1)) - os.rmdir(subdir) - except OSError as e: - # Typically when files have already been removed or - # don't exist. Ignore. - pass - @pytest.mark.skip("unreliable sometimes fails due to differences in machines.") -def test_images_match(setup): +def test_images_match(setup, remove_files): ''' Compare an expected plot with the newly created plot to verify that the plot hasn't changed in appearance. ''' - comparison = CompareImages('./wind_rose_expected.png', './wind_rose_custom.png') + comparison = CompareImages(f'{cwd}/wind_rose_expected.png', f'{cwd}/wind_rose_custom.png') assert comparison.mssim == 1 - cleanup() + remove_files(cwd, CLEANUP_FILES) diff --git a/test/wind_rose/wind_rose_custom.yaml b/test/wind_rose/wind_rose_custom.yaml index 0851ec38..605a8792 100644 --- a/test/wind_rose/wind_rose_custom.yaml +++ b/test/wind_rose/wind_rose_custom.yaml @@ -33,9 +33,9 @@ angularaxis_ticktext: # !!!!!!!! IMPORTANT !!!!!! # Uncomment the stat_input setting and specify the full path to the # point_stat_mpr.txt file -stat_input: ./point_stat_mpr.txt +stat_input: !ENV '${TEST_DIR}/point_stat_mpr.txt' -plot_filename: ./wind_rose_custom.png +plot_filename: !ENV '${TEST_DIR}/wind_rose_custom.png' dump_points: True # Optional, uncomment and set to directory to store the .points1 file diff --git a/test/wind_rose/wind_rose_custom2_points.yaml b/test/wind_rose/wind_rose_custom2_points.yaml index b9177f49..544405ff 100644 --- a/test/wind_rose/wind_rose_custom2_points.yaml +++ b/test/wind_rose/wind_rose_custom2_points.yaml @@ -29,8 +29,8 @@ angularaxis_ticktext: - 'E' - 'S' - 'W' -plot_filename: ./intermed_files/wind_rose_points2.png +plot_filename: !ENV '${TEST_DIR}/intermed_files/wind_rose_points2.png' dump_points: True -points_path: ./intermed_files +points_path: !ENV '${TEST_DIR}/intermed_files' # No stat_input, force creation of 'default' points file name show_in_browser: False \ No newline at end of file diff --git a/test/wind_rose/wind_rose_custom_points.yaml b/test/wind_rose/wind_rose_custom_points.yaml index 61842b59..56c1347c 100644 --- a/test/wind_rose/wind_rose_custom_points.yaml +++ b/test/wind_rose/wind_rose_custom_points.yaml @@ -29,8 +29,8 @@ angularaxis_ticktext: - 'E' - 'S' - 'W' -plot_filename: ./intermed_files/wind_rose_custom_points.png +plot_filename: !ENV '${TEST_DIR}/intermed_files/wind_rose_custom_points.png' dump_points: True -points_path: ./intermed_files -stat_input: ./point_stat_mpr.txt +points_path: !ENV '${TEST_DIR}/intermed_files' +stat_input: !ENV '${TEST_DIR}/point_stat_mpr.txt' show_in_browser: False \ No newline at end of file