Skip to content

Commit

Permalink
Merge pull request #114 from mila-iqia/updating_torch
Browse files Browse the repository at this point in the history
Updating torch and others
  • Loading branch information
mirkobronzi authored Mar 12, 2024
2 parents 1099db2 + 6bc361f commit 4f4bcf4
Show file tree
Hide file tree
Showing 7 changed files with 21 additions and 13 deletions.
7 changes: 4 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
mlruns

examples/data/
examples/*/output/
examples/*/output*/
examples/*/orion_working_dir*/
examples/*/lightning_logs/

examples/*/orion_db.pkl
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
Expand Down Expand Up @@ -115,4 +116,4 @@ venv.bak/
*.swo

# vscode
.vscode
.vscode
2 changes: 2 additions & 0 deletions amlrt_project/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,8 @@ def train_impl(model, datamodule, output, hyper_params, use_progress_bar, gpus):
# Log the best result and associated hyper parameters
best_dev_result = float(early_stopping.best_score.cpu().numpy())
logger.log_hyperparams(hyper_params, metrics={'best_dev_metric': best_dev_result})
with open(os.path.join(output, 'results.txt'), 'w') as stream_out:
stream_out.write(f'final best_dev_metric: {best_dev_result}\n')

return best_dev_result

Expand Down
2 changes: 1 addition & 1 deletion examples/local/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
batch_size: 32
optimizer: adam
loss: cross_entropy
max_epoch: 5
max_epoch: 1
exp_name: my_exp_1
num_workers: 0
# set to null to avoid setting a seed (can speed up GPU computation, but
Expand Down
2 changes: 1 addition & 1 deletion examples/local_orion/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
batch_size: 32
optimizer: adam
loss: cross_entropy
max_epoch: 5
max_epoch: 1
exp_name: my_exp_1
num_workers: 0
# set to null to avoid setting a seed (can speed up GPU computation, but
Expand Down
1 change: 0 additions & 1 deletion examples/local_orion/run.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
export MLFLOW_TRACKING_URI='mlruns'
export ORION_DB_ADDRESS='orion_db.pkl'
export ORION_DB_TYPE='pickleddb'

Expand Down
7 changes: 4 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,16 @@
'pytest==7.1.2',
'pytest-cov==3.0.0',
'pytorch_lightning==1.8.3',
'pytype==2023.1.17',
'pytype==2024.2.27',
'sphinx==7.2.6',
'sphinx-autoapi==3.0.0',
'sphinx-rtd-theme==1.3.0',
'sphinxcontrib-napoleon==0.7',
'sphinxcontrib-katex==0.9.9',
'tensorboard==2.16.2',
'tqdm==4.64.0',
'torch==1.12.0',
'torchvision==0.13.0',
'torch==2.2.1',
'torchvision==0.17.1',
],
entry_points={
'console_scripts': [
Expand Down
13 changes: 9 additions & 4 deletions tests/end2end_pytorch/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@ set -e
# go to the examples folder and run the example
cd $GITHUB_WORKSPACE/examples/local
sh run.sh
mv output outout_OLD
mv output output_OLD
# re-run the example to check reproducibility
sh run.sh
# check results are the same
DIFF_LINES=`grep "best_dev_metric" output*/stats.yaml | sed 's@^.*best_dev_metric: @@g' | uniq | wc -l`
echo "results are:"
cat output*/results.txt
DIFF_LINES=`cat output*/results.txt | uniq | wc -l`
if [ ${DIFF_LINES} -gt 1 ]; then
echo "ERROR: two identical runs produced different output results - review seed implementation"
exit 1
Expand All @@ -23,8 +25,11 @@ mv orion_working_dir orion_working_dir_OLD
# re-run the example to check reproducibility
rm -fr orion_db*
sh run.sh
DIFF_LINES=`grep "best_dev_metric" orion_working_dir*/*/stats.yaml | sed 's@^.*best_dev_metric: @@g' | sort | uniq | wc -l`
if [ ${DIFF_LINES} -gt 2 ]; then
# check results are the same
echo "results are:"
cat orion_working_dir*/*/results.txt
DIFF_LINES=`grep "best_dev_metric" orion_working_dir*/*/results.txt | sed 's@^.*best_dev_metric: @@g' | sort | uniq | wc -l`
if [ ${DIFF_LINES} -gt 2 ]; then # note we have two trials per experiment, this is why we can have 2 different results - but not more
echo "ERROR: two identical Orion runs produced different output results - review seed implementation"
exit 1
else
Expand Down

0 comments on commit 4f4bcf4

Please sign in to comment.