Skip to content

Commit

Permalink
ScriptRunner framework option in examples (#2827)
Browse files Browse the repository at this point in the history
* use framework option in examples

* rename files
  • Loading branch information
SYangster authored Aug 22, 2024
1 parent d7c92cf commit 9785297
Show file tree
Hide file tree
Showing 13 changed files with 29 additions and 31 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@
from nvflare.app_common.widgets.intime_model_selector import IntimeModelSelector
from nvflare.app_common.workflows.scatter_and_gather import ScatterAndGather
from nvflare.app_opt.sklearn.joblib_model_param_persistor import JoblibModelParamPersistor
from nvflare.client.config import ExchangeFormat
from nvflare.job_config.script_runner import ScriptRunner
from nvflare.job_config.script_runner import FrameworkType, ScriptRunner

preprocess = True # if False, assume data is already preprocessed and split

Expand Down Expand Up @@ -146,7 +145,7 @@ def split_higgs(input_data_path, input_header_path, output_dir, site_num, sample
executor = ScriptRunner(
script=train_script,
script_args=f"--data_root_dir {data_output_dir}",
params_exchange_format=ExchangeFormat.RAW, # kmeans requires raw values only rather than PyTorch Tensors (the default)
framework=FrameworkType.RAW, # kmeans requires raw values only rather than PyTorch Tensors (the default)
)
job.to(executor, f"site-{i+1}") # HIGGs data splitter assumes site names start from 1

Expand Down
6 changes: 4 additions & 2 deletions examples/advanced/job_api/tf/tf_fl_script_runner_cifar10.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from nvflare import FedJob
from nvflare.app_common.widgets.intime_model_selector import IntimeModelSelector
from nvflare.app_opt.tf.job_config.model import TFModel
from nvflare.job_config.script_runner import ScriptRunner
from nvflare.job_config.script_runner import FrameworkType, ScriptRunner

gpu_devices = tf.config.experimental.list_physical_devices("GPU")
for device in gpu_devices:
Expand Down Expand Up @@ -159,7 +159,9 @@
# Add clients
for i, train_idx_path in enumerate(train_idx_paths):
curr_task_script_args = task_script_args + f" --train_idx_path {train_idx_path}"
executor = ScriptRunner(script=train_script, script_args=curr_task_script_args)
executor = ScriptRunner(
script=train_script, script_args=curr_task_script_args, framework=FrameworkType.TENSORFLOW
)
job.to(executor, f"site-{i+1}")

# Can export current job to folder.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@
from nvflare.app_common.widgets.intime_model_selector import IntimeModelSelector
from nvflare.app_common.workflows.scatter_and_gather import ScatterAndGather
from nvflare.app_opt.sklearn.joblib_model_param_persistor import JoblibModelParamPersistor
from nvflare.client.config import ExchangeFormat
from nvflare.job_config.script_runner import ScriptRunner
from nvflare.job_config.script_runner import FrameworkType, ScriptRunner

preprocess = True # if False, assume data is already preprocessed and split

Expand Down Expand Up @@ -146,7 +145,7 @@ def split_higgs(input_data_path, input_header_path, output_dir, site_num, sample
executor = ScriptRunner(
script=train_script,
script_args=f"--data_root_dir {data_output_dir}",
params_exchange_format=ExchangeFormat.RAW, # kmeans requires raw values only rather than PyTorch Tensors (the default)
framework=FrameworkType.RAW, # kmeans requires raw values only rather than PyTorch Tensors (the default)
)
job.to(executor, f"site-{i+1}") # HIGGs data splitter assumes site names start from 1

Expand Down
6 changes: 2 additions & 4 deletions examples/getting_started/tf/nvflare_tf_getting_started.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@
"outputs": [],
"source": [
"from nvflare import FedJob\n",
"from nvflare.job_config.script_runner import ScriptRunner\n",
"from nvflare.job_config.script_runner import FrameworkType, ScriptRunner\n",
"from nvflare.app_common.workflows.fedavg import FedAvg\n",
"\n",
"job = FedJob(name=\"cifar10_tf_fedavg\")"
Expand Down Expand Up @@ -363,12 +363,10 @@
"metadata": {},
"outputs": [],
"source": [
"from nvflare.client.config import ExchangeFormat\n",
"\n",
"for i in range(n_clients):\n",
" executor = ScriptRunner(\n",
" script=\"src/cifar10_tf_fl.py\", script_args=\"\", # f\"--batch_size 32 --data_path /tmp/data/site-{i}\"\n",
" params_exchange_format=ExchangeFormat.NUMPY,\n",
" framework=FrameworkType.TENSORFLOW,\n",
" )\n",
" job.to(executor, f\"site-{i+1}\")"
]
Expand Down
6 changes: 4 additions & 2 deletions examples/getting_started/tf/tf_fl_script_runner_cifar10.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from nvflare import FedJob
from nvflare.app_common.widgets.intime_model_selector import IntimeModelSelector
from nvflare.app_opt.tf.job_config.model import TFModel
from nvflare.job_config.script_runner import ScriptRunner
from nvflare.job_config.script_runner import FrameworkType, ScriptRunner

gpu_devices = tf.config.experimental.list_physical_devices("GPU")
for device in gpu_devices:
Expand Down Expand Up @@ -159,7 +159,9 @@
# Add clients
for i, train_idx_path in enumerate(train_idx_paths):
curr_task_script_args = task_script_args + f" --train_idx_path {train_idx_path}"
executor = ScriptRunner(script=train_script, script_args=curr_task_script_args)
executor = ScriptRunner(
script=train_script, script_args=curr_task_script_args, framework=FrameworkType.TENSORFLOW
)
job.to(executor, f"site-{i+1}")

# Can export current job to folder.
Expand Down
2 changes: 1 addition & 1 deletion examples/hello-world/hello-cyclic/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ bash ./prepare_data.sh
Run the script using the job API to create the job and run it with the simulator:

```
python3 cyclic_script_runner_hello-cyclic.py
python3 cyclic_script_runner.py
```

### 3. Access the logs and results
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,8 @@

from nvflare import FedJob
from nvflare.app_common.workflows.cyclic import Cyclic
from nvflare.app_opt.pt.job_config.model import PTModel
from nvflare.client.config import ExchangeFormat
from nvflare.job_config.script_runner import ScriptRunner
from nvflare.app_opt.tf.job_config.model import TFModel
from nvflare.job_config.script_runner import FrameworkType, ScriptRunner

if __name__ == "__main__":
n_clients = 2
Expand All @@ -35,14 +34,14 @@
job.to(controller, "server")

# Define the initial global model and send to server
job.to(PTModel(Net()), "server")
job.to(TFModel(Net()), "server")

# Add clients
for i in range(n_clients):
executor = ScriptRunner(
script=train_script,
script_args="", # f"--batch_size 32 --data_path /tmp/data/site-{i}"
params_exchange_format=ExchangeFormat.NUMPY,
framework=FrameworkType.TENSORFLOW,
)
job.to(executor, f"site-{i+1}")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@
from nvflare import FedJob
from nvflare.app_common.widgets.intime_model_selector import IntimeModelSelector
from nvflare.app_common.workflows.fedavg import FedAvg
from nvflare.client.config import ExchangeFormat
from nvflare.job_config.script_runner import ScriptRunner
from nvflare.job_config.script_runner import FrameworkType, ScriptRunner

if __name__ == "__main__":
n_clients = 2
Expand All @@ -36,7 +35,7 @@

# Add clients
for i in range(n_clients):
executor = ScriptRunner(script=train_script, script_args="", params_exchange_format=ExchangeFormat.NUMPY)
executor = ScriptRunner(script=train_script, script_args="", framework=FrameworkType.NUMPY)
job.to(executor, f"site-{i+1}")

# job.export_job("/tmp/nvflare/jobs/job_config")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@
"source": [
"from nvflare import FedJob\n",
"from nvflare.app_common.workflows.fedavg import FedAvg\n",
"from nvflare.job_config.script_runner import ScriptRunner\n",
"from nvflare.job_config.script_runner import FrameworkType, ScriptRunner\n",
"\n",
"\n",
"job = FedJob(name=\"hello-fedavg-numpy\")"
Expand Down Expand Up @@ -266,13 +266,11 @@
"metadata": {},
"outputs": [],
"source": [
"from nvflare.client.config import ExchangeFormat\n",
"\n",
"train_script = \"src/hello-numpy_fl.py\"\n",
"\n",
"for i in range(n_clients):\n",
" executor = ScriptRunner(\n",
" script=train_script, script_args=\"\", params_exchange_format=ExchangeFormat.NUMPY\n",
" script=train_script, script_args=\"\", framework=FrameworkType.NUMPY\n",
" )\n",
" job.to(executor, f\"site-{i}\")"
]
Expand Down
2 changes: 1 addition & 1 deletion examples/hello-world/hello-pt/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ pip3 install -r requirements.txt
Run the script using the job API to create the job and run it with the simulator:

```
python3 fedavg_script_runner_hello-pt.py
python3 fedavg_script_runner_pt.py
```

### 3. Access the logs and results
Expand Down
2 changes: 1 addition & 1 deletion examples/hello-world/hello-tf/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ pip3 install tensorflow
Run the script using the job API to create the job and run it with the simulator:

```
python3 fedavg_script_runner_hello-tf.py
python3 fedavg_script_runner_tf.py
```

### 3. Access the logs and results
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from src.tf_net import TFNet

from nvflare.app_opt.tf.job_config.fed_avg import FedAvgJob
from nvflare.job_config.script_runner import ScriptRunner
from nvflare.job_config.script_runner import FrameworkType, ScriptRunner

if __name__ == "__main__":
n_clients = 2
Expand All @@ -27,7 +27,9 @@
# Add clients
for i in range(n_clients):
executor = ScriptRunner(
script=train_script, script_args="" # f"--batch_size 32 --data_path /tmp/data/site-{i}"
script=train_script,
script_args="", # f"--batch_size 32 --data_path /tmp/data/site-{i}"
framework=FrameworkType.TENSORFLOW,
)
job.to(executor, f"site-{i+1}")

Expand Down

0 comments on commit 9785297

Please sign in to comment.