From b95d21c2090fb02b2a3c1aa1ab8153c42196b7fc Mon Sep 17 00:00:00 2001 From: Nils Finke Date: Tue, 10 Dec 2024 14:13:32 +0100 Subject: [PATCH 01/12] init devcontainer setup --- .devcontainer/devcontainer.env.template | 0 .devcontainer/devcontainer.json | 0 .gitignore | 3 +++ 3 files changed, 3 insertions(+) create mode 100644 .devcontainer/devcontainer.env.template create mode 100644 .devcontainer/devcontainer.json diff --git a/.devcontainer/devcontainer.env.template b/.devcontainer/devcontainer.env.template new file mode 100644 index 0000000..e69de29 diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..e69de29 diff --git a/.gitignore b/.gitignore index 85f0197..d0228b2 100644 --- a/.gitignore +++ b/.gitignore @@ -59,3 +59,6 @@ MANIFEST # Version file /src/pytanis/_version.py + +# Devcontainer +devcontainer.env \ No newline at end of file From 7abf79914a97800b12689500318cbe76f5516ea4 Mon Sep 17 00:00:00 2001 From: Nils Finke Date: Tue, 10 Dec 2024 15:44:12 +0000 Subject: [PATCH 02/12] init devcontainer --- .devcontainer/devcontainer.env.template | 0 .devcontainer/devcontainer.json | 42 +++++++++++++++++++++++++ 2 files changed, 42 insertions(+) delete mode 100644 .devcontainer/devcontainer.env.template diff --git a/.devcontainer/devcontainer.env.template b/.devcontainer/devcontainer.env.template deleted file mode 100644 index e69de29..0000000 diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index e69de29..1050138 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,42 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/python +{ + "name": "pytanis", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "image": "mcr.microsoft.com/devcontainers/python:3.12", + // "runArgs": ["--env-file", ".devcontainer/devcontainer.env"], + "customizations": { + "vscode": { + "extensions": [ + "ms-python.python", + "ms-toolsai.jupyter", + "ms-toolsai.jupyter-keymap", + "ms-toolsai.jupyter-renderers", + "eamodio.gitlens" + // "GitHub.copilot" + ] + }, + "settings": { + "python.defaultInterpreterPath": "/usr/local/bin/python", + "jupyter.jupyterServerType": "local" + } + }, + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "apt update && apt install -y pipx && pipx install hatch && pipx install pre-commit && hatch config set dirs.env.virtual .direnv", + + // Configure tool-specific properties. + // "customizations": {}, + + "mounts": [ + "type=bind,source=${localEnv:HOME}/.ssh,target=/root/.ssh" + ], + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + "remoteUser": "root" +} \ No newline at end of file From 61d4ead1ba81932c0912e4376c703c71c47f3e69 Mon Sep 17 00:00:00 2001 From: Nils Finke Date: Sun, 15 Dec 2024 08:48:07 +0000 Subject: [PATCH 03/12] include hatch-pip-compiler for cli usage and upgrade highspy dependencies --- .devcontainer/devcontainer.json | 22 ++-------------------- requirements.txt | 24 ++++++------------------ requirements/requirements-docs.txt | 13 ++++--------- requirements/requirements-lint.txt | 14 ++++---------- 4 files changed, 16 insertions(+), 57 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 1050138..4cc4af1 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,10 +1,6 @@ -// For format details, see https://aka.ms/devcontainer.json. For config options, see the -// README at: https://github.com/devcontainers/templates/tree/main/src/python { "name": "pytanis", - // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile "image": "mcr.microsoft.com/devcontainers/python:3.12", - // "runArgs": ["--env-file", ".devcontainer/devcontainer.env"], "customizations": { "vscode": { "extensions": [ @@ -13,7 +9,6 @@ "ms-toolsai.jupyter-keymap", "ms-toolsai.jupyter-renderers", "eamodio.gitlens" - // "GitHub.copilot" ] }, "settings": { @@ -21,22 +16,9 @@ "jupyter.jupyterServerType": "local" } }, - // Features to add to the dev container. More info: https://containers.dev/features. - // "features": {}, - - // Use 'forwardPorts' to make a list of ports inside the container available locally. - // "forwardPorts": [], - - // Use 'postCreateCommand' to run commands after the container is created. - "postCreateCommand": "apt update && apt install -y pipx && pipx install hatch && pipx install pre-commit && hatch config set dirs.env.virtual .direnv", - - // Configure tool-specific properties. - // "customizations": {}, - + "postCreateCommand": "apt update && apt install -y pipx && pipx install hatch && pipx install pre-commit && pipx install hatch-pip-compile && hatch config set dirs.env.virtual .direnv", "mounts": [ "type=bind,source=${localEnv:HOME}/.ssh,target=/root/.ssh" ], - - // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. "remoteUser": "root" -} \ No newline at end of file +} diff --git a/requirements.txt b/requirements.txt index ef85343..14a5bc9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,8 +34,6 @@ anyio==4.2.0 # via # httpx # jupyter-server -appnope==0.1.3 - # via ipykernel argon2-cffi==23.1.0 # via jupyter-server argon2-cffi-bindings==21.2.0 @@ -79,7 +77,6 @@ contourpy==1.2.0 coverage==7.4.0 # via # hatch.envs.default - # coverage # pytest-cov cycler==0.12.1 # via matplotlib @@ -93,12 +90,6 @@ dill==0.3.7 # via pyomo et-xmlfile==1.1.0 # via openpyxl -exceptiongroup==1.2.0 - # via - # anyio - # hypothesis - # ipython - # pytest executing==2.0.1 # via stack-data fastjsonschema==2.19.0 @@ -124,7 +115,7 @@ gspread-formatting==1.1.2 # via hatch.envs.default h11==0.14.0 # via httpcore -highspy==1.5.3 +highspy==1.8.1 # via hatch.envs.default httpcore==1.0.2 # via httpx @@ -253,6 +244,7 @@ numpy==1.26.2 # via # casadi # contourpy + # highspy # matplotlib # numdifftools # pandas @@ -439,11 +431,7 @@ terminado==0.18.0 tinycss2==1.2.1 # via nbconvert tomli==2.0.1 - # via - # hatch.envs.default - # coverage - # jupyterlab - # pytest + # via hatch.envs.default tornado==6.4 # via # ipykernel @@ -472,8 +460,6 @@ types-python-dateutil==2.8.19.14 # via arrow typing-extensions==4.9.0 # via - # anyio - # async-lru # pint # pydantic # pydantic-core @@ -488,7 +474,9 @@ vcrpy==5.1.0 wcwidth==0.2.12 # via prompt-toolkit webcolors==1.13 - # via jsonschema + # via + # hatch.envs.default + # jsonschema webencodings==0.5.1 # via # bleach diff --git a/requirements/requirements-docs.txt b/requirements/requirements-docs.txt index 457a7ce..91792ca 100644 --- a/requirements/requirements-docs.txt +++ b/requirements/requirements-docs.txt @@ -1,7 +1,7 @@ # -# This file is autogenerated by hatch-pip-compile with Python 3.10 +# This file is autogenerated by hatch-pip-compile with Python 3.12 # -# [constraints] requirements.txt (SHA256: 0c8899916e6b15d62ed3518e3fbe022d59cf03e2a2b595d8f9b2413ab54cbb6d) +# [constraints] requirements.txt (SHA256: 7e49353888764b21a9ef18e235de44a7d9d193b8ceb4aa5acebdbb8473c809fd) # # - mkdocs~=1.5 # - mkdocs-material~=9.4 @@ -113,11 +113,6 @@ et-xmlfile==1.1.0 # via # -c requirements.txt # openpyxl -exceptiongroup==1.2.0 - # via - # -c requirements.txt - # anyio - # ipython executing==2.0.1 # via # -c requirements.txt @@ -161,7 +156,7 @@ h11==0.14.0 # via # -c requirements.txt # httpcore -highspy==1.5.3 +highspy==1.8.1 # via # -c requirements.txt # hatch.envs.docs @@ -308,6 +303,7 @@ numpy==1.26.2 # -c requirements.txt # casadi # contourpy + # highspy # matplotlib # numdifftools # pandas @@ -521,7 +517,6 @@ traitlets==5.14.0 typing-extensions==4.9.0 # via # -c requirements.txt - # anyio # pint # pydantic # pydantic-core diff --git a/requirements/requirements-lint.txt b/requirements/requirements-lint.txt index 7a43ced..e33e744 100644 --- a/requirements/requirements-lint.txt +++ b/requirements/requirements-lint.txt @@ -1,7 +1,7 @@ # -# This file is autogenerated by hatch-pip-compile with Python 3.10 +# This file is autogenerated by hatch-pip-compile with Python 3.12 # -# [constraints] requirements.txt (SHA256: 0c8899916e6b15d62ed3518e3fbe022d59cf03e2a2b595d8f9b2413ab54cbb6d) +# [constraints] requirements.txt (SHA256: 7e49353888764b21a9ef18e235de44a7d9d193b8ceb4aa5acebdbb8473c809fd) # # - mypy~=1.7 # - ruff~=0.1.14 @@ -77,11 +77,6 @@ et-xmlfile==1.1.0 # via # -c requirements.txt # openpyxl -exceptiongroup==1.2.0 - # via - # -c requirements.txt - # anyio - # ipython executing==2.0.1 # via # -c requirements.txt @@ -117,7 +112,7 @@ h11==0.14.0 # via # -c requirements.txt # httpcore -highspy==1.5.3 +highspy==1.8.1 # via # -c requirements.txt # hatch.envs.lint @@ -192,6 +187,7 @@ numpy==1.26.2 # -c requirements.txt # casadi # contourpy + # highspy # matplotlib # numdifftools # pandas @@ -357,7 +353,6 @@ tomli==2.0.1 # via # -c requirements.txt # hatch.envs.lint - # mypy tqdm==4.66.1 # via # -c requirements.txt @@ -372,7 +367,6 @@ traitlets==5.14.0 typing-extensions==4.9.0 # via # -c requirements.txt - # anyio # mypy # pint # pydantic From 5fa81106839da3d18672a43b24cbdd28a98aeb9a Mon Sep 17 00:00:00 2001 From: Nils Finke Date: Mon, 16 Dec 2024 10:41:26 +0000 Subject: [PATCH 04/12] add steps to setup devcontainer environment for local development --- README.md | 23 ++++++++++++++++++++--- pyproject.toml | 1 + 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index d05b119..95a4488 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,21 @@ Move it to the `~/.pytanis` folder as `client_secret.json`. Also make sure to se ## Development -This section is only relevant if you want to contribute to Pytanis itself. Your help is highly appreciated! +This section is only relevant if you want to contribute to Pytanis itself. Your help is highly appreciated! There are two options for local development. + +Whilst both option are valid, the Devcontainer setup is the most convenient, as all dependencies are preconfigured. + +### Devcontainer Setup + +After having cloned this repository: + +1. Make sure to have a local installation of [Docker] and [VS Code] running. +2. Open [VS Code] and make sure to have the [Dev Containers Extension] from Microsoft installed. +3. Open the cloned project in [VS Code] and from the bottom right corner confirm to open the project to be opened within the Devcontainer. + +If you miss any dependencies check out the `devcontainer.json` within the `.devcontainer` folder. Otherwise, the right python environment with [pipx], [hatch], [pre-commit] and [hatch-pip-compile] as well as the initialization step for the Hatch environments are already included. + +### Conventional Setup After having cloned this repository: @@ -93,8 +107,9 @@ After having cloned this repository: 2. install [pre-commit] globally, e.g. `pipx install pre-commit`, 3. \[only once\] run `hatch config set dirs.env.virtual .direnv` to let [VS Code] find your virtual environments. -and then you are already set up to start hacking. Use `hatch run` to do everything you would normally do in a virtual -environment, e.g. `hatch run juptyer lab` to start [JupyterLab] in the default environment, `hatch run cov` for unit tests + +And then you are already set up to start hacking. Use `hatch run` to do everything you would normally do in a virtual +environment, e.g. `hatch run jupyter lab` to start [JupyterLab] in the default environment, `hatch run cov` for unit tests and coverage (like [tox]) or `hatch run docs:serve` to build & serve the documentation. For code hygiene, execute `hatch run lint:all` in order to run [ruff] and [mypy] or `hatch run lint:fix` to automatically fix formatting issues. Check out the `[tool.hatch.envs]` sections in [pyproject.toml](pyproject.toml) to learn about other commands. @@ -139,3 +154,5 @@ To start this project off a lot of inspiration and code was taken from [Alexande [ruff]: https://github.com/astral-sh/ruff [VS Code]: https://code.visualstudio.com/ [LiveChat]: https://www.livechat.com/ +[Docker]: https://www.docker.com/ +[Dev Containers Extension]: https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers diff --git a/pyproject.toml b/pyproject.toml index 57ba12a..2b47106 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -241,6 +241,7 @@ cov = "pytest --cov-report=term-missing --cov-config=pyproject.toml --cov=src/py no-cov = "cov --no-cov {args}" debug = "cov --no-cov -s --pdb --pdbcls=IPython.core.debugger:Pdb {args}" ci = "cov --cov-report lcov {args}" +notebooks = "jupyter lab --allow-root" # Docs environment [tool.hatch.envs.docs] From ec1e8c3f75e6dd364dc415b6490383216ccf7b21 Mon Sep 17 00:00:00 2001 From: Nils Finke Date: Wed, 18 Dec 2024 14:35:04 +0000 Subject: [PATCH 05/12] init stats --- .devcontainer/devcontainer.json | 2 +- .devcontainer/init.sh | 45 ++++ .gitignore | 3 +- .pre-commit-config.yaml | 1 + README.md | 2 + .../10_submission_stats_v1.ipynb | 251 ++++++++++++++++++ pyproject.toml | 1 + 7 files changed, 303 insertions(+), 2 deletions(-) create mode 100644 .devcontainer/init.sh create mode 100644 notebooks/pyconde-pydata-darmstadt-2025/10_submission_stats_v1.ipynb diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 4cc4af1..c745f9f 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -16,7 +16,7 @@ "jupyter.jupyterServerType": "local" } }, - "postCreateCommand": "apt update && apt install -y pipx && pipx install hatch && pipx install pre-commit && pipx install hatch-pip-compile && hatch config set dirs.env.virtual .direnv", + "postCreateCommand": "bash .devcontainer/init.sh", "mounts": [ "type=bind,source=${localEnv:HOME}/.ssh,target=/root/.ssh" ], diff --git a/.devcontainer/init.sh b/.devcontainer/init.sh new file mode 100644 index 0000000..618da4a --- /dev/null +++ b/.devcontainer/init.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +set -e # Terminates the script in case of errors +set -u # Terminates the script if an unset variable is used +set -o pipefail # Terminates the script if a command in a pipe fails + +# Update and installation of pipx +apt update && apt install -y pipx + +# Installations with pipx +pipx install hatch +pipx install pre-commit +pipx install hatch-pip-compile + +# Configure Hatch +hatch config set dirs.env.virtual .direnv + +# .pytanis directory and config.toml file (see README) +DIR="$HOME/.pytanis" +FILE="$DIR/config.toml" + +# Check if the file exists +if [ ! -f "$FILE" ]; then + # Create the directory if it doesn't exist + mkdir -p "$DIR" + + # Write the content to the file + cat > "$FILE" < Date: Wed, 18 Dec 2024 14:35:20 +0000 Subject: [PATCH 06/12] init stats --- .pre-commit-config.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f9ca0b9..3de3dfc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,7 +20,6 @@ repos: rev: 'v0.6.9' # make sure this is always consistent with hatch configs hooks: - id: ruff - exclude: "notebooks/.*\\.ipynb$" - id: ruff-format args: [--check, --config, ./pyproject.toml] From 758b147ef47ab3a71c985dd4e4cf01145d452b21 Mon Sep 17 00:00:00 2001 From: Nils Finke Date: Sun, 22 Dec 2024 10:54:35 +0000 Subject: [PATCH 07/12] add histograms and export --- .../10_submission_stats_v1.ipynb | 160 +++++++++++++----- 1 file changed, 119 insertions(+), 41 deletions(-) diff --git a/notebooks/pyconde-pydata-darmstadt-2025/10_submission_stats_v1.ipynb b/notebooks/pyconde-pydata-darmstadt-2025/10_submission_stats_v1.ipynb index d9c2573..3c062ca 100644 --- a/notebooks/pyconde-pydata-darmstadt-2025/10_submission_stats_v1.ipynb +++ b/notebooks/pyconde-pydata-darmstadt-2025/10_submission_stats_v1.ipynb @@ -106,9 +106,7 @@ "metadata": {}, "outputs": [], "source": [ - "# filter subs_df for the talks that have the submitted state\n", - "talks_df = subs_df[subs_df.State == 'submitted']\n", - "talks_df.head(2)\n" + "subs_df.head(2)" ] }, { @@ -117,7 +115,17 @@ "metadata": {}, "outputs": [], "source": [ - "# Alle available tracks\n", + "# filter for all submitted talks\n", + "talks_df = subs_df.loc[subs_df['State'] == 'submitted']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "main_tracks = ['PyData', 'PyCon', 'General']\n", "all_tracks = ['PyCon: MLOps & DevOps', 'PyCon: Programming & Software Engineering', 'PyCon: Python Language & Ecosystem', 'PyCon: Security', 'PyCon: Testing', 'PyCon: Django & Web', 'PyData: Data Handling & Data Engineering', 'PyData: Machine Learning & Deep Learning & Statistics', 'PyData: Natural Language Processing & Audio (incl. Generative AI NLP)', 'PyData: Computer Vision (incl. Generative AI CV)', 'PyData: Generative AI', 'PyData: Embedded Systems & Robotics', 'PyData: PyData & Scientific Libraries Stack', 'PyData: Visualisation & Jupyter', 'PyData: Research Software Engineering', 'General: Community & Diversity', 'General: Education, Career & Life', 'General: Ethics & Privacy', 'General: Infrastructure - Hardware & Cloud', 'General: Others']\n", "\n", "# all available submission types\n", @@ -128,33 +136,17 @@ "expertise_levels = list(set(expertise_levels))\n", "\n", "# all expertise categories\n", - "expertise_categories = ['Q: Expected audience expertise: Python', 'Q: Expected audience expertise: Domain\t']" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "expertise_categories = ['Q: Expected audience expertise: Python', 'Q: Expected audience expertise: Domain']\n", + "\n", "# create an dataframe with 'all_tracks' and all 'submission_types' as rows\n", "tracks_df = pd.DataFrame(all_tracks, columns=['Track'])" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "submission_types" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Stats for Talks" + "### All independent of submission type" ] }, { @@ -164,34 +156,34 @@ "outputs": [], "source": [ "# group submittaded talks by track and count the number of submissions\n", - "talks_quantification_by_domain_expertise = talks_df.loc[talks_df['Submission type'] == 'Talk'].groupby(['Track', 'Q: Expected audience expertise: Domain']).size().unstack(fill_value=0)\n", + "talks_quantification_by_domain_expertise = talks_df.groupby(['Track', 'Q: Expected audience expertise: Domain']).size().unstack(fill_value=0)\n", "talks_quantification_by_domain_expertise = tracks_df.join(talks_quantification_by_domain_expertise, on='Track')\n", - "talks_quantification_by_domain_expertise = talks_quantification_by_domain_expertise[['Track', 'None', 'Novice', 'Intermediate', 'Advanced']]\n", - "# add total number of submissions\n", "talks_quantification_by_domain_expertise['Total'] = talks_quantification_by_domain_expertise[['None', 'Novice', 'Intermediate', 'Advanced']].sum(axis=1)\n", + "talks_quantification_by_domain_expertise['Total %'] = (talks_quantification_by_domain_expertise['Total'] / talks_quantification_by_domain_expertise['Total'].sum() * 100).round(2)\n", + "talks_quantification_by_domain_expertise['Main Track'] = talks_quantification_by_domain_expertise['Track'].apply(lambda x: x.split(':')[0] if ':' in x else x)\n", + "talks_quantification_by_domain_expertise['Total % per Main Track'] = talks_quantification_by_domain_expertise.groupby('Main Track')['Total'].transform(lambda x: (x / x.sum() * 100).round(2))\n", "\n", - "talks_quantification_by_python_expertise = talks_df.loc[talks_df['Submission type'] == 'Talk'].groupby(['Track', 'Q: Expected audience expertise: Python']).size().unstack(fill_value=0)\n", + "# reorder columns\n", + "talks_quantification_by_domain_expertise = talks_quantification_by_domain_expertise[['Main Track', 'Track', 'Total', 'Total %', 'Total % per Main Track', 'None', 'Novice', 'Intermediate', 'Advanced']]\n", + "talks_quantification_by_python_expertise = talks_df.groupby(['Track', 'Q: Expected audience expertise: Python']).size().unstack(fill_value=0)\n", "talks_quantification_by_python_expertise = tracks_df.join(talks_quantification_by_python_expertise, on='Track')\n", - "talks_quantification_by_python_expertise = talks_quantification_by_python_expertise[['Track', 'None', 'Novice', 'Intermediate', 'Advanced']]\n", - "talks_quantification_by_python_expertise['Total'] = talks_quantification_by_python_expertise[['None', 'Novice', 'Intermediate', 'Advanced']].sum(axis=1)\n", - "\n", + "talks_quantification_by_python_expertise['Main Track'] = talks_quantification_by_python_expertise['Track'].apply(lambda x: x.split(':')[0] if ':' in x else x)\n", + "talks_quantification_by_python_expertise = talks_quantification_by_python_expertise[['Main Track', 'Track', 'None', 'Novice', 'Intermediate', 'Advanced']]\n", "\n", "# join talks_quantification_by_domain_expertise and talks_quantification_by_python_expertise and keep add a group column name fir the expertise level\n", - "talks_quantification = pd.merge(talks_quantification_by_domain_expertise, talks_quantification_by_python_expertise, on='Track', how='outer')\n", + "talks_quantification = pd.merge(talks_quantification_by_domain_expertise, talks_quantification_by_python_expertise, on=['Main Track', 'Track'], how='outer')\n", "\n", "talks_quantification.columns = pd.MultiIndex.from_tuples([\n", - " ('', col) if col == 'Track' else \n", + " ('', col) if (col == 'Track') | (col == 'Total') | (col == 'Total %') | (col == 'Total % per Main Track') | (col == 'Main Track') else \n", " ('Expected Domain Expertise by Audience', col.rstrip(\"_xy\")) if col.endswith('_x') else \n", " ('Expected Python Expertise by Audience', col.rstrip(\"_xy\")) \n", " for col in talks_quantification.columns\n", - " \n", "])\n", "\n", + "# fill NaN values with 0\n", "talks_quantification.fillna(0, inplace=True)\n", - "# convert float columns to integer\n", - "talks_quantification = talks_quantification.astype({col: int for col in talks_quantification.columns if col[1] != 'Track'})\n", "\n", - "talks_quantification\n" + "talks_quantification" ] }, { @@ -200,16 +192,92 @@ "metadata": {}, "outputs": [], "source": [ - "talks_quantification_by_domain_expertise" + "# Compress overall table for plotting\n", + "talks_quantification_condensed = talks_quantification.copy()\n", + "\n", + "talks_quantification_condensed['', 'Expected Domain Expertise by Audience'] = talks_quantification_condensed['Expected Domain Expertise by Audience'].to_numpy().tolist()\n", + "talks_quantification_condensed['', 'Expected Python Expertise by Audience'] = talks_quantification_condensed['Expected Python Expertise by Audience'].to_numpy().tolist()\n", + "\n", + "talks_quantification_condensed = talks_quantification_condensed.drop(columns=['Expected Domain Expertise by Audience', 'Expected Python Expertise by Audience'], level=0)\n", + "talks_quantification_condensed.columns = talks_quantification_condensed.columns.droplevel(0)\n", + "\n", + "# helper functions for plotting\n", + "def cell_histogram_with_labels(values, global_max_value=None):\n", + " max_value = max(values) if global_max_value is None else global_max_value # Maximalwert für Skalierung\n", + " bar_heights = [100 / len(values)] * len(values) # Gleichmäßige Balkenhöhen (in Prozent)\n", + " bars = \"\"\n", + " labels = ['None', 'Novice', 'Intermediate', 'Advanced']\n", + " for i, value in enumerate(values):\n", + " label = labels[i]\n", + " bar_width = (value / max_value) * 100 if max_value > 0 else 0 # Width\n", + " y_position = i * bar_heights[0] # Y-Position of each bar\n", + " # Rechteck (Bar)\n", + " bars += f''\n", + " # Text (Label)\n", + " bars += f'{label} ({int(value)})'\n", + " \n", + " svg = f\"\"\"\n", + " \n", + " {bars}\n", + " \n", + " \"\"\"\n", + " return svg\n", + "\n", + "def single_value_histogram(value, max_value):\n", + " # Calculate the width of the bar as a percentage\n", + " bar_width = (value / max_value) * 100 if max_value > 0 else 0\n", + " \n", + " # Generate the SVG\n", + " svg = f\"\"\"\n", + " \n", + " \n", + " \n", + " \n", + " {round(value, 2)}%\n", + " \n", + " \"\"\"\n", + " return svg\n", + "\n", + "# Generate output\n", + "title = f'All {int(talks_quantification_condensed['Total'].sum())} submitted talks, long talks and tutorials (excluding pending submissions)
****'\n", + "\n", + "talks_quantification_condensed_styled = talks_quantification_condensed.style \\\n", + " .set_caption(title) \\\n", + " .set_table_styles([\n", + " {'selector': 'caption', 'props': [('font-family', 'Arial'), ('font-size', '20px'), ('font-weight', 'bold')]},\n", + " {'selector': 'th', 'props': [('font-family', 'Arial'), ('max-width', '160px')]}\n", + " ]) \\\n", + " .set_properties(**{'font-family': 'Arial'}) \\\n", + " .format({\n", + " ('Total'): '{:.0f}',\n", + " ('Total %'): lambda value: single_value_histogram(\n", + " value,\n", + " talks_quantification_condensed['Total %'].max()\n", + " ),\n", + " ('Total % per Main Track'): lambda value: single_value_histogram(\n", + " value,\n", + " talks_quantification_condensed['Total % per Main Track'].max()\n", + " ),\n", + " 'Expected Domain Expertise by Audience': lambda values: cell_histogram_with_labels(\n", + " values,\n", + " np.concatenate(talks_quantification_condensed['Expected Domain Expertise by Audience'].to_numpy()).max()\n", + " ),\n", + " 'Expected Python Expertise by Audience': lambda values: cell_histogram_with_labels(\n", + " values,\n", + " np.concatenate(talks_quantification_condensed['Expected Python Expertise by Audience'].to_numpy()).max()\n", + " ),\n", + " })\n", + "\n", + "talks_quantification_condensed_styled.to_html('talks_quantification.html', index=False, escape=False)\n", + "\n", + "talks_quantification_condensed_styled" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "talks_quantification_by_python_expertise" + "### Stats for Talks" ] }, { @@ -225,6 +293,16 @@ "source": [ "### Stats for Talks (long)" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Future Todos\n", + "- Compare against historical events\n", + "- Split by submission type\n", + "- make independent of submission type" + ] } ], "metadata": { From c7276609027dfa5ac07eac52ca2a1f8d6f0d2832 Mon Sep 17 00:00:00 2001 From: Nils Finke Date: Fri, 27 Dec 2024 11:05:40 +0000 Subject: [PATCH 08/12] cleanup notebook --- .../10_submission_stats_v1.ipynb | 52 ++++--------------- 1 file changed, 10 insertions(+), 42 deletions(-) diff --git a/notebooks/pyconde-pydata-darmstadt-2025/10_submission_stats_v1.ipynb b/notebooks/pyconde-pydata-darmstadt-2025/10_submission_stats_v1.ipynb index 3c062ca..933058e 100644 --- a/notebooks/pyconde-pydata-darmstadt-2025/10_submission_stats_v1.ipynb +++ b/notebooks/pyconde-pydata-darmstadt-2025/10_submission_stats_v1.ipynb @@ -100,15 +100,6 @@ "spkrs_df = speakers_as_df(spkrs, with_questions=True)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "subs_df.head(2)" - ] - }, { "cell_type": "code", "execution_count": null, @@ -116,17 +107,13 @@ "outputs": [], "source": [ "# filter for all submitted talks\n", - "talks_df = subs_df.loc[subs_df['State'] == 'submitted']" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "talks_df = subs_df.loc[subs_df['State'] == 'submitted']\n", + "\n", + "# TODO: fetch main tracks from pretalx\n", "main_tracks = ['PyData', 'PyCon', 'General']\n", - "all_tracks = ['PyCon: MLOps & DevOps', 'PyCon: Programming & Software Engineering', 'PyCon: Python Language & Ecosystem', 'PyCon: Security', 'PyCon: Testing', 'PyCon: Django & Web', 'PyData: Data Handling & Data Engineering', 'PyData: Machine Learning & Deep Learning & Statistics', 'PyData: Natural Language Processing & Audio (incl. Generative AI NLP)', 'PyData: Computer Vision (incl. Generative AI CV)', 'PyData: Generative AI', 'PyData: Embedded Systems & Robotics', 'PyData: PyData & Scientific Libraries Stack', 'PyData: Visualisation & Jupyter', 'PyData: Research Software Engineering', 'General: Community & Diversity', 'General: Education, Career & Life', 'General: Ethics & Privacy', 'General: Infrastructure - Hardware & Cloud', 'General: Others']\n", + "\n", + "# TODO: fetch tracks from pretalx\n", + "all_tracks = ['PyCon: MLOps & DevOps', 'PyCon: Programming & Software Engineering', 'PyCon: Python Language & Ecosystem', 'PyCon: Security', 'PyCon: Testing', 'PyCon: Django & Web', 'PyData: Data Handling & Engineering', 'PyData: Machine Learning & Deep Learning & Statistics', 'PyData: Natural Language Processing & Audio (incl. Generative AI NLP)', 'PyData: Computer Vision (incl. Generative AI CV)', 'PyData: Generative AI', 'PyData: Embedded Systems & Robotics', 'PyData: PyData & Scientific Libraries Stack', 'PyData: Visualisation & Jupyter', 'PyData: Research Software Engineering', 'General: Community & Diversity', 'General: Education, Career & Life', 'General: Ethics & Privacy', 'General: Infrastructure - Hardware & Cloud', 'General: Rust', 'General: Others']\n", "\n", "# all available submission types\n", "submission_types = talks_df['Submission type'].unique()\n", @@ -239,7 +226,7 @@ " return svg\n", "\n", "# Generate output\n", - "title = f'All {int(talks_quantification_condensed['Total'].sum())} submitted talks, long talks and tutorials (excluding pending submissions)
****'\n", + "title = f'All {int(talks_quantification_condensed['Total'].sum())} submitted talks, long talks and tutorials (excluding pending and withdrawn submissions)
****'\n", "\n", "talks_quantification_condensed_styled = talks_quantification_condensed.style \\\n", " .set_caption(title) \\\n", @@ -273,27 +260,6 @@ "talks_quantification_condensed_styled" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Stats for Talks" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Stats for Tutorials" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Stats for Talks (long)" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -301,7 +267,9 @@ "# Future Todos\n", "- Compare against historical events\n", "- Split by submission type\n", - "- make independent of submission type" + "- Make independent of submission type\n", + "- Topic modelling and keyword freqzency on submissions\n", + "- Fetch tracks from Pretalx automatically (no static list as above)" ] } ], From 7e3fd76314cf48cb8231bd26caa122883f3722c4 Mon Sep 17 00:00:00 2001 From: Nils Finke Date: Tue, 17 Dec 2024 07:12:18 +0000 Subject: [PATCH 09/12] init mailgun setup --- src/pytanis/mailgun/__init__.py | 5 ++ src/pytanis/mailgun/mail.py | 84 +++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 src/pytanis/mailgun/__init__.py create mode 100644 src/pytanis/mailgun/mail.py diff --git a/src/pytanis/mailgun/__init__.py b/src/pytanis/mailgun/__init__.py new file mode 100644 index 0000000..3ebf0ad --- /dev/null +++ b/src/pytanis/mailgun/__init__.py @@ -0,0 +1,5 @@ +"""Functionality around the Mailgun API""" + +from pytanis.mailgun.mail import Mail, MailClient, Recipient + +__all__ = ['Mail', 'MailClient', 'Recipient'] diff --git a/src/pytanis/mailgun/mail.py b/src/pytanis/mailgun/mail.py new file mode 100644 index 0000000..f5f5aeb --- /dev/null +++ b/src/pytanis/mailgun/mail.py @@ -0,0 +1,84 @@ +import time + +from pydantic import BaseModel, ConfigDict, validator +from tqdm.auto import tqdm + + +class MetaData(BaseModel): + """Additional, arbitrary metadata provided by the user like for template filling""" + + model_config = ConfigDict(extra='allow') + + +class Recipient(BaseModel): + """Details about the recipient + + Use the `data` field to store additional information + """ + + name: str + email: str + address_as: str | None = None # could be the first name + data: MetaData | None = None + + # TODO[pydantic]: We couldn't refactor the `validator`, please replace it by `field_validator` manually. + # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information. + @validator('address_as') + @classmethod + def fill_with_name(cls, v, values): + if v is None: + v = values['name'] + return v + + +class Mail(BaseModel): + """Mail template + + Use the `data` field to store additional information + + You can use the typical [Format String Syntax] and the objects `recipient` and `mail` + to access metadata to complement the template, e.g.: + + ``` + Hello {recipient.address_as}, + + We hope it's ok to address you your first name rather than using your full name being {recipient.name}. + Have you read the email's subject '{mail.subject}'? How is your work right now at {recipient.data.company}? + + Cheers! + ``` + + [Format String Syntax]: https://docs.python.org/3/library/string.html#formatstrings + """ + + subject: str + body: str + recipients: list[Recipient] + data: MetaData | None = None + + +class MailClient: + """Mail client for mass mails via Mailgun""" + + batch_size: int = 10 # n messages are a batch + wait_time: int = 20 # wait time after eacht batch before next + + def __init__(self): + # TODO: add instantiation for Mailclient? + pass + + # TODO: Check return type of mail + def send(self, mail: Mail): + """Send a mail to all recipients using Mailgun""" + errors = [] + status_msg = None + for idx, recipient in enumerate(tqdm(mail.recipients), start=1): + try: + pass + except Exception as e: + errors.append((recipient, e)) + + if idx % self.batch_size == 0: + time.sleep(self.wait_time) + + return status_msg, errors From 7323cd2d1ab8813062d0bb64dc6534fb5d40f862 Mon Sep 17 00:00:00 2001 From: Nils Finke Date: Fri, 27 Dec 2024 13:30:18 +0000 Subject: [PATCH 10/12] Mailgun send mail implementation --- README.md | 5 +++++ src/pytanis/config.py | 9 ++++++++ src/pytanis/mailgun/mail.py | 45 ++++++++++++++++++++++++++++++++----- 3 files changed, 53 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index b267f18..dd91aa0 100644 --- a/README.md +++ b/README.md @@ -58,6 +58,11 @@ service_user_authentication = false account = "934jcjkdf-39df-9df-93kf-934jfhuuij39fd" entity_id = "email@host.com" token = "dal:Sx4id934C3Y-X934jldjdfjk" + +[Mailgun] +token = "gguzdgshbdhjsb87239njsa" +from_address = "PyCon DE & PyData Program Committee " +reply_to = "program25@pycon.de" ``` where you need to replace the dummy values in the sections `[Pretalx]` and `[HelpDesk]` accordingly. Note that `service_user_authentication` is not required to be set if authentication via a service user is not necessary (see [GSpread using Service Account] for more details). diff --git a/src/pytanis/config.py b/src/pytanis/config.py index 3f95117..885eb6d 100644 --- a/src/pytanis/config.py +++ b/src/pytanis/config.py @@ -29,6 +29,14 @@ class HelpDeskCfg(BaseModel): token: str | None = None +class MailgunCfg(BaseModel): + """Configuration related to the Mailgun API""" + + token: str | None = None + from_address: str | None = None + reply_to: str | None = None + + class PretalxCfg(BaseModel): """Configuration related to the Pretalx API""" @@ -43,6 +51,7 @@ class Config(BaseModel): Pretalx: PretalxCfg Google: GoogleCfg HelpDesk: HelpDeskCfg + Mailgun: MailgunCfg @field_validator('Google') @classmethod diff --git a/src/pytanis/mailgun/mail.py b/src/pytanis/mailgun/mail.py index f5f5aeb..6d23604 100644 --- a/src/pytanis/mailgun/mail.py +++ b/src/pytanis/mailgun/mail.py @@ -1,8 +1,11 @@ import time +import requests from pydantic import BaseModel, ConfigDict, validator from tqdm.auto import tqdm +from pytanis.config import Config, get_cfg + class MetaData(BaseModel): """Additional, arbitrary metadata provided by the user like for template filling""" @@ -62,23 +65,53 @@ class MailClient: batch_size: int = 10 # n messages are a batch wait_time: int = 20 # wait time after eacht batch before next + timeout: int = 10 # timeout for requests in seconds - def __init__(self): - # TODO: add instantiation for Mailclient? - pass + def __init__(self, config: Config | None = None): + if config is None: + config = get_cfg() + self._config = config # TODO: Check return type of mail def send(self, mail: Mail): """Send a mail to all recipients using Mailgun""" errors = [] - status_msg = None + responses = [] + + # TODO: improve Mailgun batch mailing by setting custom transactional variables for idx, recipient in enumerate(tqdm(mail.recipients), start=1): try: - pass + recipient_mail = mail.model_copy() + if self._config.Mailgun.token is None: + msg = 'API token for Mailgun is empty' + raise RuntimeError(msg) + if self._config.Mailgun.from_address is None: + msg = 'From Email for Mailgun is empty' + raise RuntimeError(msg) + if self._config.Mailgun.reply_to is None: + msg = 'Reply To Email for Mailgun is empty' + raise RuntimeError(msg) + + response = requests.post( + 'https://api.eu.mailgun.net/v3/mg.pycon.de/messages', + auth=('api', self._config.Mailgun.token), + data={ + 'to': [recipient.email], + 'from': self._config.Mailgun.from_address, + 'subject': recipient_mail.subject.format(recipient=recipient, mail=mail), + 'text': recipient_mail.body.format(recipient=recipient, mail=mail), + 'h:Reply-To': self._config.Mailgun.reply_to, + }, + timeout=self.timeout, + ) + # check response status message and throw exception if not 200 + response.raise_for_status() except Exception as e: errors.append((recipient, e)) + else: + responses.append(response) if idx % self.batch_size == 0: time.sleep(self.wait_time) - return status_msg, errors + return responses, errors From 4e9da5e8e3f4ab4ffcfc3cb179785745cafb1465 Mon Sep 17 00:00:00 2001 From: Nils Finke Date: Fri, 27 Dec 2024 13:44:11 +0000 Subject: [PATCH 11/12] update devcontainer init script --- .devcontainer/init.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.devcontainer/init.sh b/.devcontainer/init.sh index 618da4a..52400cf 100644 --- a/.devcontainer/init.sh +++ b/.devcontainer/init.sh @@ -38,6 +38,11 @@ service_user_authentication = false account = "" entity_id = "" token = "" + +[Mailgun] +token = "" +from_address = "" +reply_to = "" EOF echo "File created at $FILE" else From 88842d27309057816631a9e872b6b2e7b58fc51f Mon Sep 17 00:00:00 2001 From: Nils Finke Date: Sat, 28 Dec 2024 16:27:21 +0000 Subject: [PATCH 12/12] init mail to reviewers notebook --- .../20_mail_to_reviewers_v1.ipynb | 912 ++++++++++++++++++ 1 file changed, 912 insertions(+) create mode 100644 notebooks/pyconde-pydata-darmstadt-2025/20_mail_to_reviewers_v1.ipynb diff --git a/notebooks/pyconde-pydata-darmstadt-2025/20_mail_to_reviewers_v1.ipynb b/notebooks/pyconde-pydata-darmstadt-2025/20_mail_to_reviewers_v1.ipynb new file mode 100644 index 0000000..126e651 --- /dev/null +++ b/notebooks/pyconde-pydata-darmstadt-2025/20_mail_to_reviewers_v1.ipynb @@ -0,0 +1,912 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import sys\n", + "import math\n", + "import logging\n", + "import structlog\n", + "from pathlib import Path\n", + "\n", + "import tomli\n", + "import numpy as np\n", + "\n", + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import matplotlib as mpl\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "%config InlineBackend.figure_format = 'retina'\n", + "\n", + "import seaborn as sns\n", + "sns.set_context(\"poster\")\n", + "sns.set(rc={\"figure.figsize\": (16, 9.)})\n", + "sns.set_style(\"whitegrid\")\n", + "\n", + "import pandas as pd\n", + "pd.set_option(\"display.max_rows\", 120)\n", + "pd.set_option(\"display.max_columns\", 120)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Set the logging level\n", + "logging.basicConfig(level=logging.INFO, stream=sys.stdout)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pytanis\n", + "from pytanis import GSheetsClient, PretalxClient, HelpDeskClient\n", + "from pytanis.review import Col\n", + "from pytanis.mailgun import Mail, Recipient, MailClient" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Be aware that this notebook might only run with the following version\n", + "pytanis.__version__ " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import event-specific settings to don't have them here in the notebook\n", + "with open('config.toml', 'rb') as fh:\n", + " cfg = tomli.load(fh)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Get all the Reviewers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gsheet_client = GSheetsClient()\n", + "gsheet_df = gsheet_client.gsheet_as_df(cfg['reviewer_spread_id'], cfg['reviewer_work_name'])\n", + "# rename columns to stick to our convention\n", + "col_map = {\n", + " \"Topics you want to review\": Col.track_prefs,\n", + " \"Email address\": Col.email,\n", + " \"Name\": Col.speaker_name,\n", + " \"Affiliation\": Col.affiliation,\n", + " \"Who do you know from the Committee?\": Col.committee_contact,\n", + " \"Availability during the Review Period\": Col.availability,\n", + " \"Additional comments regarding your availability during the review period.\": Col.availability_comment,\n", + " \"Activated in Pretalx\": Col.pretalx_activated,\n", + " \"Do you want your name to be listed as a reviewer on the conference website?\": Col.public,\n", + " \"Wants all proposals\": Col.all_proposals,\n", + " \"Any additional comments for the Program Committee\": Col.comment,\n", + " \"Committee Member\": Col.committee_member\n", + "}\n", + "gsheet_df.rename(columns=col_map, inplace=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# add column to address people nicely\n", + "gsheet_df[Col.address_as] = gsheet_df[Col.speaker_name].apply(lambda x: x.split()[0].title())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reviewers_all_df = gsheet_df[[Col.speaker_name, Col.email, Col.address_as]]\n", + "reviewers_all = reviewers_all_df.apply(lambda x: Recipient(name=x[Col.speaker_name], email=x[Col.email], address_as=x[Col.address_as]), axis=1).to_list()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# determine reviewers having not even activated the Pretalx Acccount \n", + "reviewers_not_activated_df = gsheet_df.loc[gsheet_df[Col.pretalx_activated].isna(), [Col.speaker_name, Col.email, Col.address_as]]\n", + "reviewers_not_activated = reviewers_not_activated_df.apply(lambda x: Recipient(name=x[Col.speaker_name], email=x[Col.email], address_as=x[Col.address_as]), axis=1).to_list()\n", + "\n", + "# TODO: add Committee Member as column to the Reviewers spreadsheet in the Google Drive \n", + "# NOTE: in the last years we have as well excluded Committee members, which I have not done here as I don't have the information in the Spreadsheet (and I believe sending emails to the committee members is not a problem anyways)\n", + "# & gsheet_df[Col.committee_member].isna()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: add Pretalx Email as column to the spreadsheet in the Google Drive \n", + "# for activated reviewers we take the e-mail address of their pretalx account\n", + "reviewers_activated_df = gsheet_df.loc[gsheet_df[Col.pretalx_activated].notnull()] # , [Col.speaker_name, 'Pretalx Mail', Col.address_as]\n", + "reviewers_activated = reviewers_activated_df.apply(lambda x: Recipient(name=x[Col.speaker_name], email=x['Pretalx Mail'], address_as=x[Col.address_as]), axis=1).to_list()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Initial Mail to Reviewers for Onboarding" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_body = \"\"\"\n", + "Hi {recipient.address_as}!\n", + "\n", + "I hope this message finds you well. As the Chair of the Programme Committee for PyConDE & PyData,\n", + "it's my pleasure to welcome you to our team. Your contribution is vital to the success of our upcoming event.\n", + "\n", + "Today, you should have received an invitation from noreply@pretalx.com to join Pretalx, our platform for\n", + "managing conference submissions. Please follow the link in the email to activate your account and\n", + "ensure you keep your login credentials safe.\n", + "\n", + "We will be sending out more detailed information and guidelines in the new year.\n", + "In the meantime, if you encounter any issues signing up or have any questions,\n", + "feel free to reach out to us at program25@pycon.de.\n", + "\n", + "Until then, I wish you a joyful Christmas season and a fantastic start to the New Year!\n", + "\n", + "Warm regards,\n", + "\n", + "Florian Wilhelm\n", + "Program Committee Chair\n", + "PyCon DE & PyData Darmstadt 2025\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail = Mail(\n", + " subject=\"Welcome to PyConDE & PyData 2025 Review Team - Important Account Activation Information\",\n", + " body=mail_body,\n", + " recipients=reviewers_not_activated\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_client = MailClient()\n", + "responses, errors = mail_client.send(mail)\n", + "assert not errors" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Mail to Reviewers that haven't activated their account in Pretalx" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_body = \"\"\"\n", + "Hi {recipient.address_as} and welcome to the PyConDE / PyData 2025 review team!\n", + "\n", + "I hope this message finds you well. As the Chair of the Programme Committee for PyConDE & PyData,\n", + "it's my pleasure to welcome you to our team. Your contribution is vital to the success of our upcoming event.\n", + "\n", + "In the last few days, you should have received an invitation from noreply@pretalx.com to join Pretalx, our platform for\n", + "managing conference submissions. Have you checked your SPAM folder yet as it seems you haven't activated your\n", + "Pretalx Account for reviewing. Please follow the link in the Pretalx email to activate your account and ensure you keep\n", + "your login credentials safe. PLEASE DO THIS NOW :-)\n", + "\n", + "We will be sending out more detailed information and guidelines in the new year.\n", + "In the meantime, if you encounter any issues signing up, can't find the Pretalx email or have any questions,\n", + "feel free to reach out to us at program25@pycon.de.\n", + "\n", + "All the best,\n", + "Program Committee\n", + "PyCon DE & PyData Darmstadt 2025\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail = Mail(\n", + " subject=\"Important! Please activate your Pretalx Account for PyConDE & PyData 2025\",\n", + " body=mail_body,\n", + " recipients=reviewers_not_activated\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_client = MailClient()\n", + "responses, errors = mail_client.send(mail)\n", + "assert not errors" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mail regarding the Review Onboarding" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_body = \"\"\"\n", + "Hi {recipient.address_as},\n", + "\n", + "the end of the year 2024 is near and we are all excited about what's to come in 2025.\n", + "The Programme Committee is doing right now the final preparations to organise the review process.\n", + "\n", + "We have just updated our Reviewer Guidelines (link below) for you to read before the actual review\n", + "process will start on Wednesday, 9th of January and the deadline is Friday, 31st of January Midnight.\n", + "To meet your fellow reviewers, have a nice chat, talk to the programme committee and ask questions\n", + "personally, we also offer two non-mandatory Get Togethers on the th & th of January (details below).\n", + "\n", + "If you have any questions, need help, don't hesitate contacting us at program25@pycon.de.\n", + "We appreciate your help very much and want to make sure that you are having a good time.\n", + "\n", + "Thank you very much, {recipient.address_as}, for your support!\n", + "\n", + "Summary:\n", + "* Review period: 9. January 2025 - 31 January, 00:00 CET\n", + "* Pretalx: https://pretalx.com/orga/event/pyconde-pydata-2025/reviews/\n", + "* Reviewer Guidelines (https://docs.google.com/document/d/1zncTc8gm7OUIIWt175YohriJ5ux7yHkLJFBpQvwNyX8/edit?tab=t.0#heading=h.hfsau13gky5q)\n", + "* [Nonobligatory] PyCon 2025 - Reviewer's Get Together, Meet & Greet and your Questions:\n", + " - Thursday, xx. January 2025 · 5:00 bis 5:40PM CET, video call: https://meet.google.com/\n", + " - Thursday, xx. January 2025 · 5:00 bis 5:40PM CET, video call: https://meet.google.com/\n", + "* Contact program25@pycon.de for support if needed\n", + "\n", + "IMPORTANT: If you haven't signed up on Pretalx yet, please search for a mail from `noreply@pretalx.com`\n", + " in your mailbox, also SPAM folder and confirm it. It's necessary for our assignment of proposals for review.\n", + "\n", + "\n", + "All the best,\n", + "Program Committee\n", + "PyCon DE & PyData Darmstadt 2025\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail = Mail(\n", + " subject=\"[PyConDE/PyData 2025] Information about the Review Process in 2025\",\n", + " body=mail_body,\n", + " recipients=reviewers_all\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_client = MailClient()\n", + "responses, errors = mail_client.send(mail)\n", + "assert not errors" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mail regarding the Review Onboarding 2 only to activated reviewers" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_body = \"\"\"\n", + "Hi {recipient.address_as},\n", + "\n", + "we hope you had a great holiday season, after all you were nice and activated your Pretalx account,\n", + "so Santa must have been generous with you ;-) One last bit of information about the review process for\n", + "this year. More to come in 2025...\n", + "\n", + "We just assigned every reviewer initially 10 proposals so that you have a chance to familiarize yourself with\n", + "Pretalx before the Reviewer's Get Together dates (see below) to ask questions. It's *not* mandatory to participate\n", + "and the official review phase will start on January 9th in 2025. So this is just a tidbit of what's to come in 2025.\n", + "\n", + "See you next year and thank you very much {recipient.address_as} for your support!\n", + "\n", + "Summary:\n", + "* Review period: 9. January 2025 - 31 January, 00:00 CET\n", + "* Pretalx: https://pretalx.com/orga/event/pyconde-pydata-2025/reviews/\n", + "* Reviewer Guidelines (https://docs.google.com/document/d/1zncTc8gm7OUIIWt175YohriJ5ux7yHkLJFBpQvwNyX8/edit?tab=t.0#heading=h.hfsau13gky5q)\n", + "* [Nonobligatory] PyCon 2025 - Reviewer's Get Together, Meet & Greet and your Questions:\n", + " - Thursday, xx. January 2025 · 5:00 bis 5:40PM CET, video call: https://meet.google.com/\n", + " - Thursday, xx. January 2025 · 5:00 bis 5:40PM CET, video call: https://meet.google.com/\n", + "* Contact program25@pycon.de for support if needed\n", + "\n", + "All the best,\n", + "Program Committee\n", + "PyCon DE & PyData Darmstadt 2025\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail = Mail(\n", + " subject=\"[PyConDE/PyData 2025] One more thing about the Review Process in 2025\",\n", + " body=mail_body,\n", + " recipients=reviewers_activated\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_client = MailClient()\n", + "responses, errors = mail_client.send(mail)\n", + "assert not errors" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Mail to Reviewers activated in Pretalx" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mail regarding the Review Onboarding" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_body = \"\"\"\n", + "Hi {recipient.address_as},\n", + "\n", + "the end of the year 2024 is near and we are all excited about what's to come in 2025.\n", + "The Programme Committee is doing right now the final preparations to organise the review process.\n", + "\n", + "We have just updated our Reviewer Guidelines (link below) for you to read before the actual review\n", + "process will start on Wednesday, 9th of January and the deadline is Friday, 31st of January Midnight.\n", + "To meet your fellow reviewers, have a nice chat, talk to the programme committee and ask questions\n", + "personally, we also offer two non-mandatory Get Togethers on the th & th of January (details below).\n", + "\n", + "If you have any questions, need help, don't hesitate contacting us at program25@pycon.de.\n", + "We appreciate your help very much and want to make sure that you are having a good time.\n", + "\n", + "Thank you very much, {recipient.address_as}, for your support!\n", + "\n", + "Summary:\n", + "* Review period: 9. January 2025 - 31 January, 00:00 CET\n", + "* Pretalx: https://pretalx.com/orga/event/pyconde-pydata-2025/reviews/\n", + "* Reviewer Guidelines (https://docs.google.com/document/d/1zncTc8gm7OUIIWt175YohriJ5ux7yHkLJFBpQvwNyX8/edit?tab=t.0#heading=h.hfsau13gky5q)\n", + "* [Nonobligatory] PyCon 2025 - Reviewer's Get Together, Meet & Greet and your Questions:\n", + " - Thursday, xx. January 2025 · 5:00 bis 5:40PM CET, video call: https://meet.google.com/\n", + " - Thursday, xx. January 2025 · 5:00 bis 5:40PM CET, video call: https://meet.google.com/\n", + "* Contact program25@pycon.de for support if needed\n", + "\n", + "IMPORTANT: If you haven't signed up on Pretalx yet, please search for a mail from `noreply@pretalx.com`\n", + " in your mailbox, also SPAM folder and confirm it. It's necessary for our assignment of proposals for review.\n", + "\n", + "\n", + "All the best,\n", + "Program Committee\n", + "PyCon DE & PyData Darmstadt 2025\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail = Mail(\n", + " subject=\"[PyConDE/PyData 2025] Information about the Review Process in 2025\",\n", + " body=mail_body,\n", + " recipients=reviewers_activated\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_client = MailClient()\n", + "responses, errors = mail_client.send(mail)\n", + "assert not errors" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mail regarding the start of the Review Phase" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_body = \"\"\"\n", + "Hi {recipient.address_as},\n", + "\n", + "les jeux sont faits! The chips are down! and our Call for Proposals is now finally closed :-)\n", + "We are very happy to announce that we have now a total of proposals! That's even more than last year.\n", + "\n", + "So let's all begin with the review process and make this year's PyConDE & PyData Darmstadt 2025 a great success.\n", + "For this year's review process will try a new approach, simplified approach to last year's process.\n", + "We will assign each reviewer about proposals but expect everyone to review only at least proposals until\n", + "the end of January. So if you have time and want to review more proposals than , please feel free to do so.\n", + "This helps out another reviewer who couldn't complete proposals for unforeseen reasons.\n", + "If you want to know more, read the PS text of this mail.\n", + "\n", + "If you have any questions, need help, don't hesitate contacting us at program25@pycon.de.\n", + "We appreciate your help very much and want to make sure that you are having a good time.\n", + "\n", + "Thank you very much, {recipient.address_as}, for your support!\n", + "\n", + "Summary:\n", + "* Review period: 9. January 2025 - 31 January, 00:00 CET\n", + "* Pretalx: https://pretalx.com/orga/event/pyconde-pydata-2025/reviews/\n", + "* Reviewer Guidelines (https://docs.google.com/document/d/1zncTc8gm7OUIIWt175YohriJ5ux7yHkLJFBpQvwNyX8/edit?tab=t.0#heading=h.hfsau13gky5q)\n", + "* [Nonobligatory] PyCon 2025 - Reviewer's Get Together, Meet & Greet and your Questions:\n", + " - Thursday, xx. January 2025 · 5:00 bis 5:40PM CET, video call: https://meet.google.com/\n", + " - Thursday, xx. January 2025 · 5:00 bis 5:40PM CET, video call: https://meet.google.com/\n", + "* Contact program25@pycon.de for support if needed\n", + "\n", + "All the best,\n", + "Program Committee\n", + "PyCon DE & PyData Darmstadt 2025\n", + "\n", + "PS: In total, we are looking like last year for 3 reviews per proposal to get a fair evaluation of each proposal.\n", + "To make sure we really have 3 reviews per proposal in the end, we will initially assign each proposal to 5 reviewers.\n", + "Whenever a proposal has 3 reviews, it will be removed from the list of proposals to review for the remaining reviewers.\n", + "This is the reason why we define a minimum number of proposals to review for each reviewer but assign more proposals.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail = Mail(\n", + " subject=\"[PyConDE/PyData 2025] Our Review Phase starts TODAY\",\n", + " body=mail_body,\n", + " recipients=reviewers_activated\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_client = MailClient()\n", + "responses, errors = mail_client.send(mail)\n", + "assert not errors" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mail after 1 week or reviews" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_body = \"\"\"\n", + "Hi {recipient.address_as},\n", + "\n", + "the first of three weeks of the reviews phase are over and % of all reviewers have already started.\n", + "We have accomplished together already one third of all necessary reviews. That's great!\n", + "\n", + "So let's keep up the good work and finish the review process by the end of January.\n", + "\n", + "Thank you very much, {recipient.address_as}, for your support!\n", + "\n", + "Information summary:\n", + "* Review period: 9. January 2025 - 31 January, 00:00 CET\n", + "* Pretalx: https://pretalx.com/orga/event/pyconde-pydata-2025/reviews/\n", + "* Reviewer Guidelines (https://docs.google.com/document/d/1zncTc8gm7OUIIWt175YohriJ5ux7yHkLJFBpQvwNyX8/edit?tab=t.0#heading=h.hfsau13gky5q)\n", + "* Contact program25@pycon.de for support if needed\n", + "\n", + "All the best,\n", + "Program Committee\n", + "PyCon DE & PyData Darmstadt 2025\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail = Mail(\n", + " subject=\"[PyConDE/PyData 2025] First week of the Review Phase\",\n", + " body=mail_body,\n", + " recipients=reviewers_activated\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_client = MailClient()\n", + "responses, errors = mail_client.send(mail)\n", + "assert not errors" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mail after 2 week or reviews" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_body = \"\"\"\n", + "Hi {recipient.address_as},\n", + "\n", + "the second of three weeks of the review phase are over and % of all reviewers have already started.\n", + "We have accomplished together already about two third of all necessary reviews. That's great!\n", + "\n", + "If you haven't started yet, please do so now. We are almost there! ONLY 9 DAYS LEFT.\n", + "So let's keep up the good work and finish the review process by the end of January.\n", + "\n", + "Thank you very much, {recipient.address_as}, for your support!\n", + "\n", + "Information summary:\n", + "* Please review all assigned proposals but at least proposals :-)\n", + "* Review period: 9. January 2025 - 31 January, 00:00 CET\n", + "* Pretalx: https://pretalx.com/orga/event/pyconde-pydata-2025/reviews/\n", + "* Reviewer Guidelines (https://docs.google.com/document/d/1zncTc8gm7OUIIWt175YohriJ5ux7yHkLJFBpQvwNyX8/edit?tab=t.0#heading=h.hfsau13gky5q)\n", + "* Contact program25@pycon.de for support if needed\n", + "\n", + "All the best,\n", + "Program Committee\n", + "PyCon DE & PyData Darmstadt 2025\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail = Mail(\n", + " subject=\"[PyConDE/PyData 2025] ONLY 9 DAYS LEFT!\",\n", + " body=mail_body,\n", + " recipients=reviewers_activated\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_client = MailClient()\n", + "responses, errors = mail_client.send(mail)\n", + "assert not errors" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mail after 3 week or reviews" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pretalx_client = PretalxClient()\n", + "n_reviews, reviews = pretalx_client.reviews(cfg['event_name'])\n", + "reviews = list(reviews)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "scored_reviews_df = pd.DataFrame([{\"user\": r.user, \"score\": r.score, \"n_reviews\": r.submission} for r in reviews if r.score is not None])\n", + "scored_reviews_df = scored_reviews_df.groupby(\"user\").count()[[\"n_reviews\"]]\n", + "scored_reviews_df['top_perc'] = (1. - scored_reviews_df.rank(pct=True)[\"n_reviews\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reviewers_activated_df = pd.merge(reviewers_activated_df, scored_reviews_df, right_on='user', left_on='Pretalx Name', how='left')\n", + "reviewers_activated_df[\"n_reviews\"].fillna(0., inplace=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_feedback(x):\n", + " if x['n_reviews'] == 0.:\n", + " return \"So far you haven't reviewed any proposals, it seems. Now it's really time to get started :-)\\nPlease let us know if you are not able to review for some reason. In this case, we must assign your proposals to others soon.\"\n", + " elif x['n_reviews'] < 20.:\n", + " return \"You have reviewed only a few proposals so far, less than 20. Please review some more proposals.\\nWe are close to the finish line.\"\n", + " else:\n", + " return f\"Thanks that you already supported us so much! We are close to the finish line.\\nIf you have time, please review some more proposals.\"\n", + "\n", + "reviewers_activated_df[\"feedback\"] = reviewers_activated_df.apply(get_feedback, axis=1)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reviewers_activated = reviewers_activated_df.apply(lambda x: Recipient(name=x[Col.speaker_name], \n", + " email=x[Col.email], \n", + " address_as=x[Col.address_as], \n", + " data={\"feedback\": x[\"feedback\"]}), axis=1).to_list()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_body = \"\"\"\n", + "Hi {recipient.address_as},\n", + "\n", + "it's the final countdown, only 5 days left until the end of the review phase\n", + "and yet we have more than % of all necessary reviews missing :-(\n", + "{recipient.data.feedback}\n", + "\n", + "Thank you very much, {recipient.address_as}, for your support!\n", + "\n", + "Information summary:\n", + "* Please review all assigned proposals but at least 22 proposals :-)\n", + "* Review period: 9. January 2025 - 31 January, 00:00 CET\n", + "* Pretalx: https://pretalx.com/orga/event/pyconde-pydata-2025/reviews/\n", + "* Reviewer Guidelines (https://docs.google.com/document/d/1zncTc8gm7OUIIWt175YohriJ5ux7yHkLJFBpQvwNyX8/edit?tab=t.0#heading=h.hfsau13gky5q)\n", + "* Contact program25@pycon.de for support if needed\n", + "\n", + "All the best,\n", + "Program Committee\n", + "PyCon DE & PyData Daarmstadt 2025\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail = Mail(\n", + " subject=\"[PyConDE/PyData 2025] ONLY 5 DAYS LEFT FOR YOUR REVIEWS!\",\n", + " body=mail_body,\n", + " recipients=reviewers_activated\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_client = MailClient()\n", + "responses, errors = mail_client.send(mail)\n", + "assert not errors" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mail End of Review!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def get_feedback(x):\n", + " if x['n_reviews'] == 0.:\n", + " return \"It seems you couldn't review and if you had problems or technical reasons please let us know :-)\"\n", + " else:\n", + " return 'Thanks that you supported us so much! We are really happy about your contribution.'\n", + "\n", + "reviewers_activated_df[\"feedback\"] = reviewers_activated_df.apply(get_feedback, axis=1)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reviewers_activated = reviewers_activated_df.apply(lambda x: Recipient(name=x[Col.speaker_name], \n", + " email=x[Col.email], \n", + " address_as=x[Col.address_as], \n", + " data={\"feedback\": x[\"feedback\"]}), axis=1).to_list()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_body = \"\"\"\n", + "Hi {recipient.address_as},\n", + "\n", + "it's done! The review phase is over and we have accomplished together a great job.\n", + "And we are finished even one day before the deadline :-)\n", + "\n", + "{recipient.data.feedback}\n", + "Together, more than reviewers wrote about reviews for roughly proposals.\n", + "That's amazing!\n", + "\n", + "All the best,\n", + "Program Committee\n", + "PyCon DE & PyData Darmstadt 2025 (program25@pycon.de)\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail = Mail(\n", + " subject=\"[PyConDE/PyData 2025] The Review Phase is Over!\",\n", + " body=mail_body,\n", + " recipients=reviewers_activated\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mail_client = MailClient()\n", + "responses, errors = mail_client.send(mail)\n", + "assert not errors" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}