Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +81 -0
- 52715.error +309 -0
- 52715.log +114 -0
- 52729.error +309 -0
- 52729.log +114 -0
- Meta-causal/code-stage1-pipeline/56451.error +297 -0
- Meta-causal/code-stage1-pipeline/56451.log +0 -0
- Meta-causal/code-stage1-pipeline/56452.error +302 -0
- Meta-causal/code-stage1-pipeline/56452.log +0 -0
- Meta-causal/code-stage1-pipeline/56454.error +3 -0
- Meta-causal/code-stage1-pipeline/56454.log +0 -0
- Meta-causal/code-stage1-pipeline/56455.error +4 -0
- Meta-causal/code-stage1-pipeline/56455.log +0 -0
- Meta-causal/code-stage1-pipeline/56456.error +3 -0
- Meta-causal/code-stage1-pipeline/56456.log +0 -0
- Meta-causal/code-stage1-pipeline/56457.error +4 -0
- Meta-causal/code-stage1-pipeline/56457.log +0 -0
- Meta-causal/code-stage1-pipeline/56458.error +3 -0
- Meta-causal/code-stage1-pipeline/56458.log +0 -0
- Meta-causal/code-stage1-pipeline/56526.error +31 -0
- Meta-causal/code-stage1-pipeline/56526.log +4 -0
- Meta-causal/code-stage1-pipeline/56527.error +31 -0
- Meta-causal/code-stage1-pipeline/56527.log +3 -0
- Meta-causal/code-stage1-pipeline/56528.error +3 -0
- Meta-causal/code-stage1-pipeline/56528.log +0 -0
- Meta-causal/code-stage1-pipeline/56529.error +1 -0
- Meta-causal/code-stage1-pipeline/56529.log +0 -0
- Meta-causal/code-stage1-pipeline/56540.error +4 -0
- Meta-causal/code-stage1-pipeline/56540.log +151 -0
- Meta-causal/code-stage1-pipeline/56541.error +0 -0
- Meta-causal/code-stage1-pipeline/56541.log +432 -0
- Meta-causal/code-stage1-pipeline/AllEpochs_test_digit_v13.py +101 -0
- Meta-causal/code-stage1-pipeline/AllEpochs_test_pacs_v13.py +103 -0
- Meta-causal/code-stage1-pipeline/data_loader_joint_v3.py +861 -0
- Meta-causal/code-stage1-pipeline/env.yaml +119 -0
- Meta-causal/code-stage1-pipeline/main_my_joint_v13_auto.py +279 -0
- Meta-causal/code-stage1-pipeline/main_test_digit_v13.py +85 -0
- Meta-causal/code-stage1-pipeline/main_test_pacs_v13.py +89 -0
- Meta-causal/code-stage1-pipeline/network/adaptor_v2.py +63 -0
- Meta-causal/code-stage1-pipeline/network/mnist_net_my.py +104 -0
- Meta-causal/code-stage1-pipeline/network/resnet.py +101 -0
- Meta-causal/code-stage1-pipeline/network/wideresnet.py +86 -0
- Meta-causal/code-stage1-pipeline/run_PACS/run_my_joint_v13_test.sh +35 -0
- Meta-causal/code-stage1-pipeline/run_digits/run_my_joint_test.sh +34 -0
- Meta-causal/code-stage1-pipeline/saved-PACS/art_painting/CA_multiple_16fa_v2_ep70_lr0.01_cosine_base0.01_bs6_lamCa_1_lamRe1_adt4_cls1_EW2_70_rmTrue_rnTrue_str5/events.out.tfevents.1719926752.hala +3 -0
- Meta-causal/code-stage1-pipeline/saved-PACS/art_painting/CA_multiple_16fa_v2_ep70_lr0.01_cosine_base0.01_bs6_lamCa_1_lamRe1_adt4_cls1_EW2_70_rmTrue_rnTrue_str5/log.log +1 -0
- Meta-causal/code-stage1-pipeline/saved-digit/CA_multiple_14fa_all_ep500_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3/events.out.tfevents.1719925086.hala +3 -0
- Meta-causal/code-stage1-pipeline/saved-digit/CA_multiple_14fa_all_ep500_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3/events.out.tfevents.1719925314.hala +3 -0
- Meta-causal/code-stage1-pipeline/saved-digit/CA_multiple_14fa_all_ep500_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3/events.out.tfevents.1719925652.hala +3 -0
- Meta-causal/code-stage1-pipeline/saved-digit/CA_multiple_14fa_all_ep500_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3/log.log +1 -0
.gitignore
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# no IntelliJ files
|
| 2 |
+
.idea
|
| 3 |
+
|
| 4 |
+
# don't upload macOS folder info
|
| 5 |
+
*.DS_Store
|
| 6 |
+
|
| 7 |
+
# don't upload node_modules from npm test
|
| 8 |
+
node_modules/*
|
| 9 |
+
flow-typed/*
|
| 10 |
+
|
| 11 |
+
# potential files generated by golang
|
| 12 |
+
bin/
|
| 13 |
+
|
| 14 |
+
# don't upload webpack bundle file
|
| 15 |
+
app/dist/
|
| 16 |
+
|
| 17 |
+
# potential integration testing data directory
|
| 18 |
+
# test_data/
|
| 19 |
+
/data
|
| 20 |
+
|
| 21 |
+
#python
|
| 22 |
+
*.pyc
|
| 23 |
+
__pycache__/
|
| 24 |
+
|
| 25 |
+
# pytype
|
| 26 |
+
.pytype
|
| 27 |
+
|
| 28 |
+
# vscode sftp settings
|
| 29 |
+
.vscode/sftp.json
|
| 30 |
+
|
| 31 |
+
# vscode launch settings
|
| 32 |
+
.vscode/launch.json
|
| 33 |
+
|
| 34 |
+
# redis
|
| 35 |
+
*.rdb
|
| 36 |
+
|
| 37 |
+
# mypy
|
| 38 |
+
.mypy_cache
|
| 39 |
+
|
| 40 |
+
# jest coverage cache
|
| 41 |
+
coverage/
|
| 42 |
+
|
| 43 |
+
# downloaded repos and models
|
| 44 |
+
scalabel/bot/experimental/*
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# python virtual environment
|
| 48 |
+
env/
|
| 49 |
+
|
| 50 |
+
# vscode workspace configuration
|
| 51 |
+
*.code-workspace
|
| 52 |
+
|
| 53 |
+
# sphinx build folder
|
| 54 |
+
_build/
|
| 55 |
+
|
| 56 |
+
# media files are not in this repo
|
| 57 |
+
doc/media
|
| 58 |
+
|
| 59 |
+
# ignore rope db cache
|
| 60 |
+
.vscode/.ropeproject
|
| 61 |
+
|
| 62 |
+
# python build
|
| 63 |
+
build/
|
| 64 |
+
dist/
|
| 65 |
+
|
| 66 |
+
# coverage
|
| 67 |
+
.coverage*
|
| 68 |
+
|
| 69 |
+
# package default workspace
|
| 70 |
+
/output
|
| 71 |
+
|
| 72 |
+
*.tmp
|
| 73 |
+
*.zip
|
| 74 |
+
|
| 75 |
+
# local test logs and scripts
|
| 76 |
+
log/
|
| 77 |
+
/*.sh
|
| 78 |
+
wandb/
|
| 79 |
+
|
| 80 |
+
# No lightning logs
|
| 81 |
+
lightning_logs/
|
52715.error
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Solving dependencies
|
| 2 |
+
Installing conda packages
|
| 3 |
+
Empty environment created at prefix: /scratch/yuqian_fu/micromamba/envs/auto-zcubaqpyrbpe
|
| 4 |
+
error libmamba Could not lock non-existing path '/scratch/yuqian_fu/micromamba/pkgs'
|
| 5 |
+
Transaction
|
| 6 |
+
|
| 7 |
+
Prefix: /scratch/yuqian_fu/micromamba/envs/auto-zcubaqpyrbpe
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
No specs added or removed.
|
| 12 |
+
|
| 13 |
+
Package Version Build Channel Size
|
| 14 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 15 |
+
Install:
|
| 16 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 17 |
+
|
| 18 |
+
+ _libgcc_mutex 0.1 conda_forge conda-forge
|
| 19 |
+
+ _openmp_mutex 4.5 2_kmp_llvm conda-forge
|
| 20 |
+
+ blas 2.116 mkl conda-forge
|
| 21 |
+
+ blas-devel 3.9.0 16_linux64_mkl conda-forge
|
| 22 |
+
+ brotli-python 1.1.0 py311hb755f60_1 conda-forge
|
| 23 |
+
+ bzip2 1.0.8 hd590300_5 conda-forge
|
| 24 |
+
+ ca-certificates 2024.6.2 hbcca054_0 conda-forge
|
| 25 |
+
+ certifi 2024.6.2 pyhd8ed1ab_0 conda-forge
|
| 26 |
+
+ cffi 1.16.0 py311hb3a22ac_0 conda-forge
|
| 27 |
+
+ charset-normalizer 3.3.2 pyhd8ed1ab_0 conda-forge
|
| 28 |
+
+ cuda-cudart 12.1.105 0 nvidia
|
| 29 |
+
+ cuda-cupti 12.1.105 0 /work/conda/cache/nvidia
|
| 30 |
+
+ cuda-libraries 12.1.0 0 nvidia
|
| 31 |
+
+ cuda-nvrtc 12.1.105 0 /work/conda/cache/nvidia
|
| 32 |
+
+ cuda-nvtx 12.1.105 0 nvidia
|
| 33 |
+
+ cuda-opencl 12.5.39 0 nvidia
|
| 34 |
+
+ cuda-runtime 12.1.0 0 nvidia
|
| 35 |
+
+ cuda-version 12.5 3 nvidia
|
| 36 |
+
+ ffmpeg 4.3 hf484d3e_0 /work/conda/cache/pytorch
|
| 37 |
+
+ filelock 3.15.4 pyhd8ed1ab_0 conda-forge
|
| 38 |
+
+ freetype 2.12.1 h267a509_2 conda-forge
|
| 39 |
+
+ gmp 6.3.0 hac33072_2 conda-forge
|
| 40 |
+
+ gmpy2 2.1.5 py311hc4f1f91_1 conda-forge
|
| 41 |
+
+ gnutls 3.6.13 h85f3911_1 /work/conda/cache/conda-forge
|
| 42 |
+
+ h2 4.1.0 pyhd8ed1ab_0 conda-forge
|
| 43 |
+
+ hpack 4.0.0 pyh9f0ad1d_0 conda-forge
|
| 44 |
+
+ hyperframe 6.0.1 pyhd8ed1ab_0 conda-forge
|
| 45 |
+
+ icu 73.2 h59595ed_0 /work/conda/cache/conda-forge
|
| 46 |
+
+ idna 3.7 pyhd8ed1ab_0 conda-forge
|
| 47 |
+
+ jinja2 3.1.4 pyhd8ed1ab_0 conda-forge
|
| 48 |
+
+ jpeg 9e h166bdaf_2 conda-forge
|
| 49 |
+
+ lame 3.100 h166bdaf_1003 conda-forge
|
| 50 |
+
+ lcms2 2.15 hfd0df8a_0 conda-forge
|
| 51 |
+
+ ld_impl_linux-64 2.40 hf3520f5_7 conda-forge
|
| 52 |
+
+ lerc 4.0.0 h27087fc_0 conda-forge
|
| 53 |
+
+ libblas 3.9.0 16_linux64_mkl conda-forge
|
| 54 |
+
+ libcblas 3.9.0 16_linux64_mkl conda-forge
|
| 55 |
+
+ libcublas 12.1.0.26 0 /work/conda/cache/nvidia
|
| 56 |
+
+ libcufft 11.0.2.4 0 /work/conda/cache/nvidia
|
| 57 |
+
+ libcufile 1.10.0.4 0 nvidia
|
| 58 |
+
+ libcurand 10.3.6.39 0 nvidia
|
| 59 |
+
+ libcusolver 11.4.4.55 0 /work/conda/cache/nvidia
|
| 60 |
+
+ libcusparse 12.0.2.55 0 /work/conda/cache/nvidia
|
| 61 |
+
+ libdeflate 1.17 h0b41bf4_0 conda-forge
|
| 62 |
+
+ libexpat 2.6.2 h59595ed_0 conda-forge
|
| 63 |
+
+ libffi 3.4.2 h7f98852_5 conda-forge
|
| 64 |
+
+ libgcc-ng 13.2.0 h77fa898_13 conda-forge
|
| 65 |
+
+ libgfortran-ng 13.2.0 h69a702a_13 conda-forge
|
| 66 |
+
+ libgfortran5 13.2.0 h3d2ce59_13 conda-forge
|
| 67 |
+
+ libhwloc 2.10.0 default_h5622ce7_1001 conda-forge
|
| 68 |
+
+ libiconv 1.17 hd590300_2 conda-forge
|
| 69 |
+
+ libjpeg-turbo 2.0.0 h9bf148f_0 pytorch
|
| 70 |
+
+ liblapack 3.9.0 16_linux64_mkl conda-forge
|
| 71 |
+
+ liblapacke 3.9.0 16_linux64_mkl conda-forge
|
| 72 |
+
+ libnpp 12.0.2.50 0 /work/conda/cache/nvidia
|
| 73 |
+
+ libnsl 2.0.1 hd590300_0 conda-forge
|
| 74 |
+
+ libnvjitlink 12.1.105 0 /work/conda/cache/nvidia
|
| 75 |
+
+ libnvjpeg 12.1.1.14 0 /work/conda/cache/nvidia
|
| 76 |
+
+ libpng 1.6.43 h2797004_0 conda-forge
|
| 77 |
+
+ libsqlite 3.46.0 hde9e2c9_0 conda-forge
|
| 78 |
+
+ libstdcxx-ng 13.2.0 hc0a3c3a_13 conda-forge
|
| 79 |
+
+ libtiff 4.5.0 h6adf6a1_2 conda-forge
|
| 80 |
+
+ libuuid 2.38.1 h0b41bf4_0 conda-forge
|
| 81 |
+
+ libwebp-base 1.4.0 hd590300_0 conda-forge
|
| 82 |
+
+ libxcb 1.13 h7f98852_1004 conda-forge
|
| 83 |
+
+ libxcrypt 4.4.36 hd590300_1 conda-forge
|
| 84 |
+
+ libxml2 2.12.7 hc051c1a_1 conda-forge
|
| 85 |
+
+ libzlib 1.2.13 h4ab18f5_6 conda-forge
|
| 86 |
+
+ llvm-openmp 15.0.7 h0cdce71_0 /work/conda/cache/conda-forge
|
| 87 |
+
+ markupsafe 2.1.5 py311h459d7ec_0 conda-forge
|
| 88 |
+
+ mkl 2022.1.0 h84fe81f_915 /work/conda/cache/conda-forge
|
| 89 |
+
+ mkl-devel 2022.1.0 ha770c72_916 conda-forge
|
| 90 |
+
+ mkl-include 2022.1.0 h84fe81f_915 conda-forge
|
| 91 |
+
+ mpc 1.3.1 hfe3b2da_0 conda-forge
|
| 92 |
+
+ mpfr 4.2.1 h9458935_1 conda-forge
|
| 93 |
+
+ mpmath 1.3.0 pyhd8ed1ab_0 conda-forge
|
| 94 |
+
+ ncurses 6.5 h59595ed_0 conda-forge
|
| 95 |
+
+ nettle 3.6 he412f7d_0 /work/conda/cache/conda-forge
|
| 96 |
+
+ networkx 3.3 pyhd8ed1ab_1 /work/conda/cache/conda-forge
|
| 97 |
+
+ numpy 2.0.0 py311h1461c94_0 conda-forge
|
| 98 |
+
+ openh264 2.1.1 h780b84a_0 /work/conda/cache/conda-forge
|
| 99 |
+
+ openjpeg 2.5.0 hfec8fc6_2 conda-forge
|
| 100 |
+
+ openssl 3.3.1 h4ab18f5_1 conda-forge
|
| 101 |
+
+ pandas 2.2.2 py311h14de704_1 conda-forge
|
| 102 |
+
+ pillow 9.4.0 py311h50def17_1 conda-forge
|
| 103 |
+
+ pip 24.0 pyhd8ed1ab_0 /work/conda/cache/conda-forge
|
| 104 |
+
+ pthread-stubs 0.4 h36c2ea0_1001 conda-forge
|
| 105 |
+
+ pycparser 2.22 pyhd8ed1ab_0 conda-forge
|
| 106 |
+
+ pysocks 1.7.1 pyha2e5f31_6 conda-forge
|
| 107 |
+
+ python 3.11.9 hb806964_0_cpython /work/conda/cache/conda-forge
|
| 108 |
+
+ python-dateutil 2.9.0 pyhd8ed1ab_0 conda-forge
|
| 109 |
+
+ python-tzdata 2024.1 pyhd8ed1ab_0 conda-forge
|
| 110 |
+
+ python_abi 3.11 4_cp311 conda-forge
|
| 111 |
+
+ pytorch 2.3.1 py3.11_cuda12.1_cudnn8.9.2_0 pytorch
|
| 112 |
+
+ pytorch-cuda 12.1 ha16c6d3_5 pytorch
|
| 113 |
+
+ pytorch-mutex 1.0 cuda pytorch
|
| 114 |
+
+ pytz 2024.1 pyhd8ed1ab_0 conda-forge
|
| 115 |
+
+ pyyaml 6.0.1 py311h459d7ec_1 conda-forge
|
| 116 |
+
+ readline 8.2 h8228510_1 conda-forge
|
| 117 |
+
+ requests 2.32.3 pyhd8ed1ab_0 conda-forge
|
| 118 |
+
+ setuptools 70.1.1 pyhd8ed1ab_0 conda-forge
|
| 119 |
+
+ six 1.16.0 pyh6c4a22f_0 conda-forge
|
| 120 |
+
+ sympy 1.12.1 pypyh2585a3b_103 conda-forge
|
| 121 |
+
+ tbb 2021.12.0 h297d8ca_1 conda-forge
|
| 122 |
+
+ tk 8.6.13 noxft_h4845f30_101 /work/conda/cache/conda-forge
|
| 123 |
+
+ torchtriton 2.3.1 py311 pytorch
|
| 124 |
+
+ torchvision 0.18.1 py311_cu121 pytorch
|
| 125 |
+
+ typing_extensions 4.12.2 pyha770c72_0 conda-forge
|
| 126 |
+
+ tzdata 2024a h0c530f3_0 conda-forge
|
| 127 |
+
+ urllib3 2.2.2 pyhd8ed1ab_1 conda-forge
|
| 128 |
+
+ wheel 0.43.0 pyhd8ed1ab_1 conda-forge
|
| 129 |
+
+ xorg-libxau 1.0.11 hd590300_0 conda-forge
|
| 130 |
+
+ xorg-libxdmcp 1.1.3 h516909a_0 conda-forge
|
| 131 |
+
+ xz 5.2.6 h166bdaf_0 conda-forge
|
| 132 |
+
+ yaml 0.2.5 h7f98852_2 conda-forge
|
| 133 |
+
+ zlib 1.2.13 h4ab18f5_6 conda-forge
|
| 134 |
+
+ zstandard 0.22.0 py311hb6f056b_1 conda-forge
|
| 135 |
+
+ zstd 1.5.6 ha6fb4c9_0 conda-forge
|
| 136 |
+
|
| 137 |
+
Summary:
|
| 138 |
+
|
| 139 |
+
Install: 118 packages
|
| 140 |
+
|
| 141 |
+
Total download: 0 B
|
| 142 |
+
|
| 143 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
Transaction starting
|
| 148 |
+
Linking libcublas-12.1.0.26-0
|
| 149 |
+
Linking libcufft-11.0.2.4-0
|
| 150 |
+
Linking libcusolver-11.4.4.55-0
|
| 151 |
+
Linking libcusparse-12.0.2.55-0
|
| 152 |
+
Linking libnpp-12.0.2.50-0
|
| 153 |
+
Linking libnvjitlink-12.1.105-0
|
| 154 |
+
Linking cuda-cudart-12.1.105-0
|
| 155 |
+
Linking cuda-nvrtc-12.1.105-0
|
| 156 |
+
Linking libnvjpeg-12.1.1.14-0
|
| 157 |
+
Linking cuda-cupti-12.1.105-0
|
| 158 |
+
Linking cuda-nvtx-12.1.105-0
|
| 159 |
+
Linking pytorch-mutex-1.0-cuda
|
| 160 |
+
Linking _libgcc_mutex-0.1-conda_forge
|
| 161 |
+
Linking mkl-include-2022.1.0-h84fe81f_915
|
| 162 |
+
Linking python_abi-3.11-4_cp311
|
| 163 |
+
Linking ld_impl_linux-64-2.40-hf3520f5_7
|
| 164 |
+
Linking ca-certificates-2024.6.2-hbcca054_0
|
| 165 |
+
Linking libgcc-ng-13.2.0-h77fa898_13
|
| 166 |
+
Linking libzlib-1.2.13-h4ab18f5_6
|
| 167 |
+
Linking llvm-openmp-15.0.7-h0cdce71_0
|
| 168 |
+
Linking _openmp_mutex-4.5-2_kmp_llvm
|
| 169 |
+
Linking xorg-libxdmcp-1.1.3-h516909a_0
|
| 170 |
+
Linking pthread-stubs-0.4-h36c2ea0_1001
|
| 171 |
+
Linking xorg-libxau-1.0.11-hd590300_0
|
| 172 |
+
Linking libwebp-base-1.4.0-hd590300_0
|
| 173 |
+
Linking libdeflate-1.17-h0b41bf4_0
|
| 174 |
+
Linking jpeg-9e-h166bdaf_2
|
| 175 |
+
Linking libffi-3.4.2-h7f98852_5
|
| 176 |
+
Linking tk-8.6.13-noxft_h4845f30_101
|
| 177 |
+
Linking openssl-3.3.1-h4ab18f5_1
|
| 178 |
+
Linking libxcrypt-4.4.36-hd590300_1
|
| 179 |
+
Linking libsqlite-3.46.0-hde9e2c9_0
|
| 180 |
+
Linking yaml-0.2.5-h7f98852_2
|
| 181 |
+
Linking ncurses-6.5-h59595ed_0
|
| 182 |
+
Linking libgfortran5-13.2.0-h3d2ce59_13
|
| 183 |
+
Linking lame-3.100-h166bdaf_1003
|
| 184 |
+
Linking nettle-3.6-he412f7d_0
|
| 185 |
+
Linking zlib-1.2.13-h4ab18f5_6
|
| 186 |
+
Linking libstdcxx-ng-13.2.0-hc0a3c3a_13
|
| 187 |
+
Linking libiconv-1.17-hd590300_2
|
| 188 |
+
Linking bzip2-1.0.8-hd590300_5
|
| 189 |
+
Linking libpng-1.6.43-h2797004_0
|
| 190 |
+
Linking xz-5.2.6-h166bdaf_0
|
| 191 |
+
Linking libuuid-2.38.1-h0b41bf4_0
|
| 192 |
+
Linking libnsl-2.0.1-hd590300_0
|
| 193 |
+
Linking libexpat-2.6.2-h59595ed_0
|
| 194 |
+
Linking libxcb-1.13-h7f98852_1004
|
| 195 |
+
Linking readline-8.2-h8228510_1
|
| 196 |
+
Linking libgfortran-ng-13.2.0-h69a702a_13
|
| 197 |
+
Linking icu-73.2-h59595ed_0
|
| 198 |
+
Linking zstd-1.5.6-ha6fb4c9_0
|
| 199 |
+
Linking lerc-4.0.0-h27087fc_0
|
| 200 |
+
Linking openh264-2.1.1-h780b84a_0
|
| 201 |
+
Linking gnutls-3.6.13-h85f3911_1
|
| 202 |
+
Linking gmp-6.3.0-hac33072_2
|
| 203 |
+
Linking freetype-2.12.1-h267a509_2
|
| 204 |
+
Linking libxml2-2.12.7-hc051c1a_1
|
| 205 |
+
Linking libtiff-4.5.0-h6adf6a1_2
|
| 206 |
+
Linking mpfr-4.2.1-h9458935_1
|
| 207 |
+
Linking libhwloc-2.10.0-default_h5622ce7_1001
|
| 208 |
+
Linking openjpeg-2.5.0-hfec8fc6_2
|
| 209 |
+
Linking lcms2-2.15-hfd0df8a_0
|
| 210 |
+
Linking mpc-1.3.1-hfe3b2da_0
|
| 211 |
+
Linking tbb-2021.12.0-h297d8ca_1
|
| 212 |
+
Linking mkl-2022.1.0-h84fe81f_915
|
| 213 |
+
Linking mkl-devel-2022.1.0-ha770c72_916
|
| 214 |
+
Linking libblas-3.9.0-16_linux64_mkl
|
| 215 |
+
Linking liblapack-3.9.0-16_linux64_mkl
|
| 216 |
+
Linking libcblas-3.9.0-16_linux64_mkl
|
| 217 |
+
Linking liblapacke-3.9.0-16_linux64_mkl
|
| 218 |
+
Linking blas-devel-3.9.0-16_linux64_mkl
|
| 219 |
+
Linking blas-2.116-mkl
|
| 220 |
+
Linking cuda-version-12.5-3
|
| 221 |
+
Linking tzdata-2024a-h0c530f3_0
|
| 222 |
+
Linking libjpeg-turbo-2.0.0-h9bf148f_0
|
| 223 |
+
warning libmamba [libjpeg-turbo-2.0.0-h9bf148f_0] The following files were already present in the environment:
|
| 224 |
+
- bin/cjpeg
|
| 225 |
+
- bin/djpeg
|
| 226 |
+
- bin/jpegtran
|
| 227 |
+
- bin/rdjpgcom
|
| 228 |
+
- bin/wrjpgcom
|
| 229 |
+
- include/jconfig.h
|
| 230 |
+
- include/jerror.h
|
| 231 |
+
- include/jmorecfg.h
|
| 232 |
+
- include/jpeglib.h
|
| 233 |
+
- lib/libjpeg.a
|
| 234 |
+
- lib/libjpeg.so
|
| 235 |
+
- lib/pkgconfig/libjpeg.pc
|
| 236 |
+
- share/man/man1/cjpeg.1
|
| 237 |
+
- share/man/man1/djpeg.1
|
| 238 |
+
- share/man/man1/jpegtran.1
|
| 239 |
+
- share/man/man1/rdjpgcom.1
|
| 240 |
+
- share/man/man1/wrjpgcom.1
|
| 241 |
+
Linking ffmpeg-4.3-hf484d3e_0
|
| 242 |
+
Linking libcurand-10.3.6.39-0
|
| 243 |
+
Linking libcufile-1.10.0.4-0
|
| 244 |
+
Linking cuda-opencl-12.5.39-0
|
| 245 |
+
Linking cuda-libraries-12.1.0-0
|
| 246 |
+
Linking cuda-runtime-12.1.0-0
|
| 247 |
+
Linking python-3.11.9-hb806964_0_cpython
|
| 248 |
+
Linking pytorch-cuda-12.1-ha16c6d3_5
|
| 249 |
+
Linking wheel-0.43.0-pyhd8ed1ab_1
|
| 250 |
+
Linking setuptools-70.1.1-pyhd8ed1ab_0
|
| 251 |
+
Linking pip-24.0-pyhd8ed1ab_0
|
| 252 |
+
Linking pycparser-2.22-pyhd8ed1ab_0
|
| 253 |
+
Linking six-1.16.0-pyh6c4a22f_0
|
| 254 |
+
Linking hyperframe-6.0.1-pyhd8ed1ab_0
|
| 255 |
+
Linking pytz-2024.1-pyhd8ed1ab_0
|
| 256 |
+
Linking python-tzdata-2024.1-pyhd8ed1ab_0
|
| 257 |
+
Linking charset-normalizer-3.3.2-pyhd8ed1ab_0
|
| 258 |
+
Linking hpack-4.0.0-pyh9f0ad1d_0
|
| 259 |
+
Linking pysocks-1.7.1-pyha2e5f31_6
|
| 260 |
+
Linking idna-3.7-pyhd8ed1ab_0
|
| 261 |
+
Linking certifi-2024.6.2-pyhd8ed1ab_0
|
| 262 |
+
Linking mpmath-1.3.0-pyhd8ed1ab_0
|
| 263 |
+
Linking typing_extensions-4.12.2-pyha770c72_0
|
| 264 |
+
Linking networkx-3.3-pyhd8ed1ab_1
|
| 265 |
+
Linking filelock-3.15.4-pyhd8ed1ab_0
|
| 266 |
+
Linking python-dateutil-2.9.0-pyhd8ed1ab_0
|
| 267 |
+
Linking h2-4.1.0-pyhd8ed1ab_0
|
| 268 |
+
Linking brotli-python-1.1.0-py311hb755f60_1
|
| 269 |
+
Linking markupsafe-2.1.5-py311h459d7ec_0
|
| 270 |
+
Linking gmpy2-2.1.5-py311hc4f1f91_1
|
| 271 |
+
Linking pyyaml-6.0.1-py311h459d7ec_1
|
| 272 |
+
Linking pillow-9.4.0-py311h50def17_1
|
| 273 |
+
Linking numpy-2.0.0-py311h1461c94_0
|
| 274 |
+
Linking cffi-1.16.0-py311hb3a22ac_0
|
| 275 |
+
Linking pandas-2.2.2-py311h14de704_1
|
| 276 |
+
Linking zstandard-0.22.0-py311hb6f056b_1
|
| 277 |
+
Linking jinja2-3.1.4-pyhd8ed1ab_0
|
| 278 |
+
Linking sympy-1.12.1-pypyh2585a3b_103
|
| 279 |
+
Linking urllib3-2.2.2-pyhd8ed1ab_1
|
| 280 |
+
Linking requests-2.32.3-pyhd8ed1ab_0
|
| 281 |
+
Linking pytorch-2.3.1-py3.11_cuda12.1_cudnn8.9.2_0
|
| 282 |
+
Linking torchtriton-2.3.1-py311
|
| 283 |
+
Linking torchvision-0.18.1-py311_cu121
|
| 284 |
+
|
| 285 |
+
Transaction finished
|
| 286 |
+
|
| 287 |
+
To activate this environment, use:
|
| 288 |
+
|
| 289 |
+
mamba activate auto-zcubaqpyrbpe
|
| 290 |
+
|
| 291 |
+
Or to execute a single command in this environment, use:
|
| 292 |
+
|
| 293 |
+
mamba run -n auto-zcubaqpyrbpe mycommand
|
| 294 |
+
|
| 295 |
+
Installing pip packages
|
| 296 |
+
WARNING: The candidate selected for download or install is a yanked version: 'opencv-python' candidate (version 4.5.5.62 at https://files.pythonhosted.org/packages/9d/98/36bfcbff30da27dd6922ed73ca7802c37d87f77daf4c569da3dcb87b4296/opencv_python-4.5.5.62-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from https://pypi.org/simple/opencv-python/) (requires-python:>=3.6))
|
| 297 |
+
Reason for being yanked: deprecated, use 4.5.5.64
|
| 298 |
+
Traceback (most recent call last):
|
| 299 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/metatrain_CausalStyle_RN.py", line 124, in <module>
|
| 300 |
+
base_loader = base_datamgr.get_data_loader( base_file , aug = params.train_aug )
|
| 301 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 302 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/data/datamgr.py", line 137, in get_data_loader
|
| 303 |
+
dataset = SetDataset( data_file , self.batch_size, transform )
|
| 304 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 305 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/data/dataset.py", line 62, in __init__
|
| 306 |
+
with open(data_file, 'r') as f:
|
| 307 |
+
^^^^^^^^^^^^^^^^^^^^
|
| 308 |
+
FileNotFoundError: [Errno 2] No such file or directory: '/scratch/yuqian_fu/Data/CDFSL/miniImagenet/base.json'
|
| 309 |
+
srun: error: gcp-eu-2: task 0: Exited with exit code 1
|
52715.log
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Collecting h5py>=2.9.0
|
| 2 |
+
Downloading h5py-3.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (2.5 kB)
|
| 3 |
+
Collecting ml-collections
|
| 4 |
+
Downloading ml_collections-0.1.1.tar.gz (77 kB)
|
| 5 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 77.9/77.9 kB 8.8 MB/s eta 0:00:00
|
| 6 |
+
Preparing metadata (setup.py): started
|
| 7 |
+
Preparing metadata (setup.py): finished with status 'done'
|
| 8 |
+
Collecting opencv-python==4.5.5.62
|
| 9 |
+
Downloading opencv_python-4.5.5.62-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (18 kB)
|
| 10 |
+
Collecting scipy>=1.3.2
|
| 11 |
+
Downloading scipy-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (60 kB)
|
| 12 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 60.8/60.8 kB 8.1 MB/s eta 0:00:00
|
| 13 |
+
Collecting tensorboard
|
| 14 |
+
Downloading tensorboard-2.17.0-py3-none-any.whl.metadata (1.6 kB)
|
| 15 |
+
Collecting tensorboardX>=1.4
|
| 16 |
+
Downloading tensorboardX-2.6.2.2-py2.py3-none-any.whl.metadata (5.8 kB)
|
| 17 |
+
Collecting timm
|
| 18 |
+
Downloading timm-1.0.7-py3-none-any.whl.metadata (47 kB)
|
| 19 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 47.5/47.5 kB 16.0 MB/s eta 0:00:00
|
| 20 |
+
Requirement already satisfied: numpy>=1.21.2 in ./lib/python3.11/site-packages (from opencv-python==4.5.5.62) (2.0.0)
|
| 21 |
+
Collecting absl-py (from ml-collections)
|
| 22 |
+
Downloading absl_py-2.1.0-py3-none-any.whl.metadata (2.3 kB)
|
| 23 |
+
Requirement already satisfied: PyYAML in ./lib/python3.11/site-packages (from ml-collections) (6.0.1)
|
| 24 |
+
Requirement already satisfied: six in ./lib/python3.11/site-packages (from ml-collections) (1.16.0)
|
| 25 |
+
Collecting contextlib2 (from ml-collections)
|
| 26 |
+
Downloading contextlib2-21.6.0-py2.py3-none-any.whl.metadata (4.1 kB)
|
| 27 |
+
Collecting grpcio>=1.48.2 (from tensorboard)
|
| 28 |
+
Downloading grpcio-1.64.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (3.3 kB)
|
| 29 |
+
Collecting markdown>=2.6.8 (from tensorboard)
|
| 30 |
+
Downloading Markdown-3.6-py3-none-any.whl.metadata (7.0 kB)
|
| 31 |
+
Collecting protobuf!=4.24.0,<5.0.0,>=3.19.6 (from tensorboard)
|
| 32 |
+
Downloading protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl.metadata (541 bytes)
|
| 33 |
+
Requirement already satisfied: setuptools>=41.0.0 in ./lib/python3.11/site-packages (from tensorboard) (70.1.1)
|
| 34 |
+
Collecting tensorboard-data-server<0.8.0,>=0.7.0 (from tensorboard)
|
| 35 |
+
Downloading tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl.metadata (1.1 kB)
|
| 36 |
+
Collecting werkzeug>=1.0.1 (from tensorboard)
|
| 37 |
+
Downloading werkzeug-3.0.3-py3-none-any.whl.metadata (3.7 kB)
|
| 38 |
+
Collecting packaging (from tensorboardX>=1.4)
|
| 39 |
+
Downloading packaging-24.1-py3-none-any.whl.metadata (3.2 kB)
|
| 40 |
+
Requirement already satisfied: torch in ./lib/python3.11/site-packages (from timm) (2.3.1)
|
| 41 |
+
Requirement already satisfied: torchvision in ./lib/python3.11/site-packages (from timm) (0.18.1)
|
| 42 |
+
Collecting huggingface_hub (from timm)
|
| 43 |
+
Downloading huggingface_hub-0.23.4-py3-none-any.whl.metadata (12 kB)
|
| 44 |
+
Collecting safetensors (from timm)
|
| 45 |
+
Downloading safetensors-0.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (3.8 kB)
|
| 46 |
+
Requirement already satisfied: MarkupSafe>=2.1.1 in ./lib/python3.11/site-packages (from werkzeug>=1.0.1->tensorboard) (2.1.5)
|
| 47 |
+
Requirement already satisfied: filelock in ./lib/python3.11/site-packages (from huggingface_hub->timm) (3.15.4)
|
| 48 |
+
Collecting fsspec>=2023.5.0 (from huggingface_hub->timm)
|
| 49 |
+
Downloading fsspec-2024.6.0-py3-none-any.whl.metadata (11 kB)
|
| 50 |
+
Requirement already satisfied: requests in ./lib/python3.11/site-packages (from huggingface_hub->timm) (2.32.3)
|
| 51 |
+
Collecting tqdm>=4.42.1 (from huggingface_hub->timm)
|
| 52 |
+
Downloading tqdm-4.66.4-py3-none-any.whl.metadata (57 kB)
|
| 53 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 57.6/57.6 kB 22.1 MB/s eta 0:00:00
|
| 54 |
+
Requirement already satisfied: typing-extensions>=3.7.4.3 in ./lib/python3.11/site-packages (from huggingface_hub->timm) (4.12.2)
|
| 55 |
+
Requirement already satisfied: sympy in ./lib/python3.11/site-packages (from torch->timm) (1.12.1)
|
| 56 |
+
Requirement already satisfied: networkx in ./lib/python3.11/site-packages (from torch->timm) (3.3)
|
| 57 |
+
Requirement already satisfied: jinja2 in ./lib/python3.11/site-packages (from torch->timm) (3.1.4)
|
| 58 |
+
Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in ./lib/python3.11/site-packages (from torchvision->timm) (9.4.0)
|
| 59 |
+
Requirement already satisfied: charset-normalizer<4,>=2 in ./lib/python3.11/site-packages (from requests->huggingface_hub->timm) (3.3.2)
|
| 60 |
+
Requirement already satisfied: idna<4,>=2.5 in ./lib/python3.11/site-packages (from requests->huggingface_hub->timm) (3.7)
|
| 61 |
+
Requirement already satisfied: urllib3<3,>=1.21.1 in ./lib/python3.11/site-packages (from requests->huggingface_hub->timm) (2.2.2)
|
| 62 |
+
Requirement already satisfied: certifi>=2017.4.17 in ./lib/python3.11/site-packages (from requests->huggingface_hub->timm) (2024.6.2)
|
| 63 |
+
Requirement already satisfied: mpmath<1.4.0,>=1.1.0 in ./lib/python3.11/site-packages (from sympy->torch->timm) (1.3.0)
|
| 64 |
+
Downloading opencv_python-4.5.5.62-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (60.4 MB)
|
| 65 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 60.4/60.4 MB 91.0 MB/s eta 0:00:00
|
| 66 |
+
Downloading h5py-3.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.4 MB)
|
| 67 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.4/5.4 MB 183.1 MB/s eta 0:00:00
|
| 68 |
+
Downloading scipy-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (41.1 MB)
|
| 69 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 41.1/41.1 MB 107.5 MB/s eta 0:00:00
|
| 70 |
+
Downloading tensorboard-2.17.0-py3-none-any.whl (5.5 MB)
|
| 71 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.5/5.5 MB 30.9 MB/s eta 0:00:00
|
| 72 |
+
Downloading tensorboardX-2.6.2.2-py2.py3-none-any.whl (101 kB)
|
| 73 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 101.7/101.7 kB 36.0 MB/s eta 0:00:00
|
| 74 |
+
Downloading timm-1.0.7-py3-none-any.whl (2.3 MB)
|
| 75 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.3/2.3 MB 137.1 MB/s eta 0:00:00
|
| 76 |
+
Downloading absl_py-2.1.0-py3-none-any.whl (133 kB)
|
| 77 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 133.7/133.7 kB 51.3 MB/s eta 0:00:00
|
| 78 |
+
Downloading grpcio-1.64.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.6 MB)
|
| 79 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.6/5.6 MB 88.2 MB/s eta 0:00:00
|
| 80 |
+
Downloading Markdown-3.6-py3-none-any.whl (105 kB)
|
| 81 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 105.4/105.4 kB 38.1 MB/s eta 0:00:00
|
| 82 |
+
Downloading protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl (294 kB)
|
| 83 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 294.6/294.6 kB 50.7 MB/s eta 0:00:00
|
| 84 |
+
Downloading tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl (6.6 MB)
|
| 85 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.6/6.6 MB 173.4 MB/s eta 0:00:00
|
| 86 |
+
Downloading werkzeug-3.0.3-py3-none-any.whl (227 kB)
|
| 87 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 227.3/227.3 kB 73.1 MB/s eta 0:00:00
|
| 88 |
+
Downloading contextlib2-21.6.0-py2.py3-none-any.whl (13 kB)
|
| 89 |
+
Downloading huggingface_hub-0.23.4-py3-none-any.whl (402 kB)
|
| 90 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 402.6/402.6 kB 61.1 MB/s eta 0:00:00
|
| 91 |
+
Downloading packaging-24.1-py3-none-any.whl (53 kB)
|
| 92 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 54.0/54.0 kB 21.8 MB/s eta 0:00:00
|
| 93 |
+
Downloading safetensors-0.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)
|
| 94 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.2/1.2 MB 122.2 MB/s eta 0:00:00
|
| 95 |
+
Downloading fsspec-2024.6.0-py3-none-any.whl (176 kB)
|
| 96 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 176.9/176.9 kB 62.7 MB/s eta 0:00:00
|
| 97 |
+
Downloading tqdm-4.66.4-py3-none-any.whl (78 kB)
|
| 98 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 78.3/78.3 kB 27.8 MB/s eta 0:00:00
|
| 99 |
+
Building wheels for collected packages: ml-collections
|
| 100 |
+
Building wheel for ml-collections (setup.py): started
|
| 101 |
+
Building wheel for ml-collections (setup.py): finished with status 'done'
|
| 102 |
+
Created wheel for ml-collections: filename=ml_collections-0.1.1-py3-none-any.whl size=94508 sha256=d89d1d746d60ee7c5ccd906afd932a6369bd5c90b009d4e595ac300929458aa5
|
| 103 |
+
Stored in directory: /scratch/yuqian_fu/.cache/pip/wheels/28/82/ef/a6971b09a96519d55ce6efef66f0cbcdef2ae9cc1e6b41daf7
|
| 104 |
+
Successfully built ml-collections
|
| 105 |
+
Installing collected packages: werkzeug, tqdm, tensorboard-data-server, scipy, safetensors, protobuf, packaging, opencv-python, markdown, h5py, grpcio, fsspec, contextlib2, absl-py, tensorboardX, tensorboard, ml-collections, huggingface_hub, timm
|
| 106 |
+
Successfully installed absl-py-2.1.0 contextlib2-21.6.0 fsspec-2024.6.0 grpcio-1.64.1 h5py-3.11.0 huggingface_hub-0.23.4 markdown-3.6 ml-collections-0.1.1 opencv-python-4.5.5.62 packaging-24.1 protobuf-4.25.3 safetensors-0.4.3 scipy-1.14.0 tensorboard-2.17.0 tensorboard-data-server-0.7.2 tensorboardX-2.6.2.2 timm-1.0.7 tqdm-4.66.4 werkzeug-3.0.3
|
| 107 |
+
backbone: maml: False
|
| 108 |
+
hi this is causal style
|
| 109 |
+
set seed = 0
|
| 110 |
+
|
| 111 |
+
--- prepare dataloader ---
|
| 112 |
+
train with single seen domain miniImagenet
|
| 113 |
+
|
| 114 |
+
--- build model ---
|
52729.error
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Solving dependencies
|
| 2 |
+
Installing conda packages
|
| 3 |
+
Empty environment created at prefix: /scratch/yuqian_fu/micromamba/envs/auto-zcubaqpyrbpe
|
| 4 |
+
error libmamba Could not lock non-existing path '/scratch/yuqian_fu/micromamba/pkgs'
|
| 5 |
+
Transaction
|
| 6 |
+
|
| 7 |
+
Prefix: /scratch/yuqian_fu/micromamba/envs/auto-zcubaqpyrbpe
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
No specs added or removed.
|
| 12 |
+
|
| 13 |
+
Package Version Build Channel Size
|
| 14 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 15 |
+
Install:
|
| 16 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 17 |
+
|
| 18 |
+
+ _libgcc_mutex 0.1 conda_forge conda-forge
|
| 19 |
+
+ _openmp_mutex 4.5 2_kmp_llvm conda-forge
|
| 20 |
+
+ blas 2.116 mkl conda-forge
|
| 21 |
+
+ blas-devel 3.9.0 16_linux64_mkl conda-forge
|
| 22 |
+
+ brotli-python 1.1.0 py311hb755f60_1 conda-forge
|
| 23 |
+
+ bzip2 1.0.8 hd590300_5 conda-forge
|
| 24 |
+
+ ca-certificates 2024.6.2 hbcca054_0 conda-forge
|
| 25 |
+
+ certifi 2024.6.2 pyhd8ed1ab_0 conda-forge
|
| 26 |
+
+ cffi 1.16.0 py311hb3a22ac_0 conda-forge
|
| 27 |
+
+ charset-normalizer 3.3.2 pyhd8ed1ab_0 conda-forge
|
| 28 |
+
+ cuda-cudart 12.1.105 0 nvidia
|
| 29 |
+
+ cuda-cupti 12.1.105 0 /work/conda/cache/nvidia
|
| 30 |
+
+ cuda-libraries 12.1.0 0 nvidia
|
| 31 |
+
+ cuda-nvrtc 12.1.105 0 /work/conda/cache/nvidia
|
| 32 |
+
+ cuda-nvtx 12.1.105 0 nvidia
|
| 33 |
+
+ cuda-opencl 12.5.39 0 nvidia
|
| 34 |
+
+ cuda-runtime 12.1.0 0 nvidia
|
| 35 |
+
+ cuda-version 12.5 3 nvidia
|
| 36 |
+
+ ffmpeg 4.3 hf484d3e_0 /work/conda/cache/pytorch
|
| 37 |
+
+ filelock 3.15.4 pyhd8ed1ab_0 conda-forge
|
| 38 |
+
+ freetype 2.12.1 h267a509_2 conda-forge
|
| 39 |
+
+ gmp 6.3.0 hac33072_2 conda-forge
|
| 40 |
+
+ gmpy2 2.1.5 py311hc4f1f91_1 conda-forge
|
| 41 |
+
+ gnutls 3.6.13 h85f3911_1 /work/conda/cache/conda-forge
|
| 42 |
+
+ h2 4.1.0 pyhd8ed1ab_0 conda-forge
|
| 43 |
+
+ hpack 4.0.0 pyh9f0ad1d_0 conda-forge
|
| 44 |
+
+ hyperframe 6.0.1 pyhd8ed1ab_0 conda-forge
|
| 45 |
+
+ icu 73.2 h59595ed_0 /work/conda/cache/conda-forge
|
| 46 |
+
+ idna 3.7 pyhd8ed1ab_0 conda-forge
|
| 47 |
+
+ jinja2 3.1.4 pyhd8ed1ab_0 conda-forge
|
| 48 |
+
+ jpeg 9e h166bdaf_2 conda-forge
|
| 49 |
+
+ lame 3.100 h166bdaf_1003 conda-forge
|
| 50 |
+
+ lcms2 2.15 hfd0df8a_0 conda-forge
|
| 51 |
+
+ ld_impl_linux-64 2.40 hf3520f5_7 conda-forge
|
| 52 |
+
+ lerc 4.0.0 h27087fc_0 conda-forge
|
| 53 |
+
+ libblas 3.9.0 16_linux64_mkl conda-forge
|
| 54 |
+
+ libcblas 3.9.0 16_linux64_mkl conda-forge
|
| 55 |
+
+ libcublas 12.1.0.26 0 /work/conda/cache/nvidia
|
| 56 |
+
+ libcufft 11.0.2.4 0 /work/conda/cache/nvidia
|
| 57 |
+
+ libcufile 1.10.0.4 0 nvidia
|
| 58 |
+
+ libcurand 10.3.6.39 0 nvidia
|
| 59 |
+
+ libcusolver 11.4.4.55 0 /work/conda/cache/nvidia
|
| 60 |
+
+ libcusparse 12.0.2.55 0 /work/conda/cache/nvidia
|
| 61 |
+
+ libdeflate 1.17 h0b41bf4_0 conda-forge
|
| 62 |
+
+ libexpat 2.6.2 h59595ed_0 conda-forge
|
| 63 |
+
+ libffi 3.4.2 h7f98852_5 conda-forge
|
| 64 |
+
+ libgcc-ng 13.2.0 h77fa898_13 conda-forge
|
| 65 |
+
+ libgfortran-ng 13.2.0 h69a702a_13 conda-forge
|
| 66 |
+
+ libgfortran5 13.2.0 h3d2ce59_13 conda-forge
|
| 67 |
+
+ libhwloc 2.10.0 default_h5622ce7_1001 conda-forge
|
| 68 |
+
+ libiconv 1.17 hd590300_2 conda-forge
|
| 69 |
+
+ libjpeg-turbo 2.0.0 h9bf148f_0 pytorch
|
| 70 |
+
+ liblapack 3.9.0 16_linux64_mkl conda-forge
|
| 71 |
+
+ liblapacke 3.9.0 16_linux64_mkl conda-forge
|
| 72 |
+
+ libnpp 12.0.2.50 0 /work/conda/cache/nvidia
|
| 73 |
+
+ libnsl 2.0.1 hd590300_0 conda-forge
|
| 74 |
+
+ libnvjitlink 12.1.105 0 /work/conda/cache/nvidia
|
| 75 |
+
+ libnvjpeg 12.1.1.14 0 /work/conda/cache/nvidia
|
| 76 |
+
+ libpng 1.6.43 h2797004_0 conda-forge
|
| 77 |
+
+ libsqlite 3.46.0 hde9e2c9_0 conda-forge
|
| 78 |
+
+ libstdcxx-ng 13.2.0 hc0a3c3a_13 conda-forge
|
| 79 |
+
+ libtiff 4.5.0 h6adf6a1_2 conda-forge
|
| 80 |
+
+ libuuid 2.38.1 h0b41bf4_0 conda-forge
|
| 81 |
+
+ libwebp-base 1.4.0 hd590300_0 conda-forge
|
| 82 |
+
+ libxcb 1.13 h7f98852_1004 conda-forge
|
| 83 |
+
+ libxcrypt 4.4.36 hd590300_1 conda-forge
|
| 84 |
+
+ libxml2 2.12.7 hc051c1a_1 conda-forge
|
| 85 |
+
+ libzlib 1.2.13 h4ab18f5_6 conda-forge
|
| 86 |
+
+ llvm-openmp 15.0.7 h0cdce71_0 /work/conda/cache/conda-forge
|
| 87 |
+
+ markupsafe 2.1.5 py311h459d7ec_0 conda-forge
|
| 88 |
+
+ mkl 2022.1.0 h84fe81f_915 /work/conda/cache/conda-forge
|
| 89 |
+
+ mkl-devel 2022.1.0 ha770c72_916 conda-forge
|
| 90 |
+
+ mkl-include 2022.1.0 h84fe81f_915 conda-forge
|
| 91 |
+
+ mpc 1.3.1 hfe3b2da_0 conda-forge
|
| 92 |
+
+ mpfr 4.2.1 h9458935_1 conda-forge
|
| 93 |
+
+ mpmath 1.3.0 pyhd8ed1ab_0 conda-forge
|
| 94 |
+
+ ncurses 6.5 h59595ed_0 conda-forge
|
| 95 |
+
+ nettle 3.6 he412f7d_0 /work/conda/cache/conda-forge
|
| 96 |
+
+ networkx 3.3 pyhd8ed1ab_1 /work/conda/cache/conda-forge
|
| 97 |
+
+ numpy 2.0.0 py311h1461c94_0 conda-forge
|
| 98 |
+
+ openh264 2.1.1 h780b84a_0 /work/conda/cache/conda-forge
|
| 99 |
+
+ openjpeg 2.5.0 hfec8fc6_2 conda-forge
|
| 100 |
+
+ openssl 3.3.1 h4ab18f5_1 conda-forge
|
| 101 |
+
+ pandas 2.2.2 py311h14de704_1 conda-forge
|
| 102 |
+
+ pillow 9.4.0 py311h50def17_1 conda-forge
|
| 103 |
+
+ pip 24.0 pyhd8ed1ab_0 /work/conda/cache/conda-forge
|
| 104 |
+
+ pthread-stubs 0.4 h36c2ea0_1001 conda-forge
|
| 105 |
+
+ pycparser 2.22 pyhd8ed1ab_0 conda-forge
|
| 106 |
+
+ pysocks 1.7.1 pyha2e5f31_6 conda-forge
|
| 107 |
+
+ python 3.11.9 hb806964_0_cpython /work/conda/cache/conda-forge
|
| 108 |
+
+ python-dateutil 2.9.0 pyhd8ed1ab_0 conda-forge
|
| 109 |
+
+ python-tzdata 2024.1 pyhd8ed1ab_0 conda-forge
|
| 110 |
+
+ python_abi 3.11 4_cp311 conda-forge
|
| 111 |
+
+ pytorch 2.3.1 py3.11_cuda12.1_cudnn8.9.2_0 pytorch
|
| 112 |
+
+ pytorch-cuda 12.1 ha16c6d3_5 pytorch
|
| 113 |
+
+ pytorch-mutex 1.0 cuda pytorch
|
| 114 |
+
+ pytz 2024.1 pyhd8ed1ab_0 conda-forge
|
| 115 |
+
+ pyyaml 6.0.1 py311h459d7ec_1 conda-forge
|
| 116 |
+
+ readline 8.2 h8228510_1 conda-forge
|
| 117 |
+
+ requests 2.32.3 pyhd8ed1ab_0 conda-forge
|
| 118 |
+
+ setuptools 70.1.1 pyhd8ed1ab_0 conda-forge
|
| 119 |
+
+ six 1.16.0 pyh6c4a22f_0 conda-forge
|
| 120 |
+
+ sympy 1.12.1 pypyh2585a3b_103 conda-forge
|
| 121 |
+
+ tbb 2021.12.0 h297d8ca_1 conda-forge
|
| 122 |
+
+ tk 8.6.13 noxft_h4845f30_101 /work/conda/cache/conda-forge
|
| 123 |
+
+ torchtriton 2.3.1 py311 pytorch
|
| 124 |
+
+ torchvision 0.18.1 py311_cu121 pytorch
|
| 125 |
+
+ typing_extensions 4.12.2 pyha770c72_0 conda-forge
|
| 126 |
+
+ tzdata 2024a h0c530f3_0 conda-forge
|
| 127 |
+
+ urllib3 2.2.2 pyhd8ed1ab_1 conda-forge
|
| 128 |
+
+ wheel 0.43.0 pyhd8ed1ab_1 conda-forge
|
| 129 |
+
+ xorg-libxau 1.0.11 hd590300_0 conda-forge
|
| 130 |
+
+ xorg-libxdmcp 1.1.3 h516909a_0 conda-forge
|
| 131 |
+
+ xz 5.2.6 h166bdaf_0 conda-forge
|
| 132 |
+
+ yaml 0.2.5 h7f98852_2 conda-forge
|
| 133 |
+
+ zlib 1.2.13 h4ab18f5_6 conda-forge
|
| 134 |
+
+ zstandard 0.22.0 py311hb6f056b_1 conda-forge
|
| 135 |
+
+ zstd 1.5.6 ha6fb4c9_0 conda-forge
|
| 136 |
+
|
| 137 |
+
Summary:
|
| 138 |
+
|
| 139 |
+
Install: 118 packages
|
| 140 |
+
|
| 141 |
+
Total download: 0 B
|
| 142 |
+
|
| 143 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
Transaction starting
|
| 148 |
+
Linking libcublas-12.1.0.26-0
|
| 149 |
+
Linking libcufft-11.0.2.4-0
|
| 150 |
+
Linking libcusolver-11.4.4.55-0
|
| 151 |
+
Linking libcusparse-12.0.2.55-0
|
| 152 |
+
Linking libnpp-12.0.2.50-0
|
| 153 |
+
Linking libnvjitlink-12.1.105-0
|
| 154 |
+
Linking cuda-cudart-12.1.105-0
|
| 155 |
+
Linking cuda-nvrtc-12.1.105-0
|
| 156 |
+
Linking libnvjpeg-12.1.1.14-0
|
| 157 |
+
Linking cuda-cupti-12.1.105-0
|
| 158 |
+
Linking cuda-nvtx-12.1.105-0
|
| 159 |
+
Linking pytorch-mutex-1.0-cuda
|
| 160 |
+
Linking _libgcc_mutex-0.1-conda_forge
|
| 161 |
+
Linking mkl-include-2022.1.0-h84fe81f_915
|
| 162 |
+
Linking python_abi-3.11-4_cp311
|
| 163 |
+
Linking ld_impl_linux-64-2.40-hf3520f5_7
|
| 164 |
+
Linking ca-certificates-2024.6.2-hbcca054_0
|
| 165 |
+
Linking libgcc-ng-13.2.0-h77fa898_13
|
| 166 |
+
Linking libzlib-1.2.13-h4ab18f5_6
|
| 167 |
+
Linking llvm-openmp-15.0.7-h0cdce71_0
|
| 168 |
+
Linking _openmp_mutex-4.5-2_kmp_llvm
|
| 169 |
+
Linking xorg-libxdmcp-1.1.3-h516909a_0
|
| 170 |
+
Linking pthread-stubs-0.4-h36c2ea0_1001
|
| 171 |
+
Linking xorg-libxau-1.0.11-hd590300_0
|
| 172 |
+
Linking libwebp-base-1.4.0-hd590300_0
|
| 173 |
+
Linking libdeflate-1.17-h0b41bf4_0
|
| 174 |
+
Linking jpeg-9e-h166bdaf_2
|
| 175 |
+
Linking libffi-3.4.2-h7f98852_5
|
| 176 |
+
Linking tk-8.6.13-noxft_h4845f30_101
|
| 177 |
+
Linking openssl-3.3.1-h4ab18f5_1
|
| 178 |
+
Linking libxcrypt-4.4.36-hd590300_1
|
| 179 |
+
Linking libsqlite-3.46.0-hde9e2c9_0
|
| 180 |
+
Linking yaml-0.2.5-h7f98852_2
|
| 181 |
+
Linking ncurses-6.5-h59595ed_0
|
| 182 |
+
Linking libgfortran5-13.2.0-h3d2ce59_13
|
| 183 |
+
Linking lame-3.100-h166bdaf_1003
|
| 184 |
+
Linking nettle-3.6-he412f7d_0
|
| 185 |
+
Linking zlib-1.2.13-h4ab18f5_6
|
| 186 |
+
Linking libstdcxx-ng-13.2.0-hc0a3c3a_13
|
| 187 |
+
Linking libiconv-1.17-hd590300_2
|
| 188 |
+
Linking bzip2-1.0.8-hd590300_5
|
| 189 |
+
Linking libpng-1.6.43-h2797004_0
|
| 190 |
+
Linking xz-5.2.6-h166bdaf_0
|
| 191 |
+
Linking libuuid-2.38.1-h0b41bf4_0
|
| 192 |
+
Linking libnsl-2.0.1-hd590300_0
|
| 193 |
+
Linking libexpat-2.6.2-h59595ed_0
|
| 194 |
+
Linking libxcb-1.13-h7f98852_1004
|
| 195 |
+
Linking readline-8.2-h8228510_1
|
| 196 |
+
Linking libgfortran-ng-13.2.0-h69a702a_13
|
| 197 |
+
Linking icu-73.2-h59595ed_0
|
| 198 |
+
Linking zstd-1.5.6-ha6fb4c9_0
|
| 199 |
+
Linking lerc-4.0.0-h27087fc_0
|
| 200 |
+
Linking openh264-2.1.1-h780b84a_0
|
| 201 |
+
Linking gnutls-3.6.13-h85f3911_1
|
| 202 |
+
Linking gmp-6.3.0-hac33072_2
|
| 203 |
+
Linking freetype-2.12.1-h267a509_2
|
| 204 |
+
Linking libxml2-2.12.7-hc051c1a_1
|
| 205 |
+
Linking libtiff-4.5.0-h6adf6a1_2
|
| 206 |
+
Linking mpfr-4.2.1-h9458935_1
|
| 207 |
+
Linking libhwloc-2.10.0-default_h5622ce7_1001
|
| 208 |
+
Linking openjpeg-2.5.0-hfec8fc6_2
|
| 209 |
+
Linking lcms2-2.15-hfd0df8a_0
|
| 210 |
+
Linking mpc-1.3.1-hfe3b2da_0
|
| 211 |
+
Linking tbb-2021.12.0-h297d8ca_1
|
| 212 |
+
Linking mkl-2022.1.0-h84fe81f_915
|
| 213 |
+
Linking mkl-devel-2022.1.0-ha770c72_916
|
| 214 |
+
Linking libblas-3.9.0-16_linux64_mkl
|
| 215 |
+
Linking liblapack-3.9.0-16_linux64_mkl
|
| 216 |
+
Linking libcblas-3.9.0-16_linux64_mkl
|
| 217 |
+
Linking liblapacke-3.9.0-16_linux64_mkl
|
| 218 |
+
Linking blas-devel-3.9.0-16_linux64_mkl
|
| 219 |
+
Linking blas-2.116-mkl
|
| 220 |
+
Linking cuda-version-12.5-3
|
| 221 |
+
Linking tzdata-2024a-h0c530f3_0
|
| 222 |
+
Linking libjpeg-turbo-2.0.0-h9bf148f_0
|
| 223 |
+
warning libmamba [libjpeg-turbo-2.0.0-h9bf148f_0] The following files were already present in the environment:
|
| 224 |
+
- bin/cjpeg
|
| 225 |
+
- bin/djpeg
|
| 226 |
+
- bin/jpegtran
|
| 227 |
+
- bin/rdjpgcom
|
| 228 |
+
- bin/wrjpgcom
|
| 229 |
+
- include/jconfig.h
|
| 230 |
+
- include/jerror.h
|
| 231 |
+
- include/jmorecfg.h
|
| 232 |
+
- include/jpeglib.h
|
| 233 |
+
- lib/libjpeg.a
|
| 234 |
+
- lib/libjpeg.so
|
| 235 |
+
- lib/pkgconfig/libjpeg.pc
|
| 236 |
+
- share/man/man1/cjpeg.1
|
| 237 |
+
- share/man/man1/djpeg.1
|
| 238 |
+
- share/man/man1/jpegtran.1
|
| 239 |
+
- share/man/man1/rdjpgcom.1
|
| 240 |
+
- share/man/man1/wrjpgcom.1
|
| 241 |
+
Linking ffmpeg-4.3-hf484d3e_0
|
| 242 |
+
Linking libcurand-10.3.6.39-0
|
| 243 |
+
Linking libcufile-1.10.0.4-0
|
| 244 |
+
Linking cuda-opencl-12.5.39-0
|
| 245 |
+
Linking cuda-libraries-12.1.0-0
|
| 246 |
+
Linking cuda-runtime-12.1.0-0
|
| 247 |
+
Linking python-3.11.9-hb806964_0_cpython
|
| 248 |
+
Linking pytorch-cuda-12.1-ha16c6d3_5
|
| 249 |
+
Linking wheel-0.43.0-pyhd8ed1ab_1
|
| 250 |
+
Linking setuptools-70.1.1-pyhd8ed1ab_0
|
| 251 |
+
Linking pip-24.0-pyhd8ed1ab_0
|
| 252 |
+
Linking pycparser-2.22-pyhd8ed1ab_0
|
| 253 |
+
Linking six-1.16.0-pyh6c4a22f_0
|
| 254 |
+
Linking hyperframe-6.0.1-pyhd8ed1ab_0
|
| 255 |
+
Linking pytz-2024.1-pyhd8ed1ab_0
|
| 256 |
+
Linking python-tzdata-2024.1-pyhd8ed1ab_0
|
| 257 |
+
Linking charset-normalizer-3.3.2-pyhd8ed1ab_0
|
| 258 |
+
Linking hpack-4.0.0-pyh9f0ad1d_0
|
| 259 |
+
Linking pysocks-1.7.1-pyha2e5f31_6
|
| 260 |
+
Linking idna-3.7-pyhd8ed1ab_0
|
| 261 |
+
Linking certifi-2024.6.2-pyhd8ed1ab_0
|
| 262 |
+
Linking mpmath-1.3.0-pyhd8ed1ab_0
|
| 263 |
+
Linking typing_extensions-4.12.2-pyha770c72_0
|
| 264 |
+
Linking networkx-3.3-pyhd8ed1ab_1
|
| 265 |
+
Linking filelock-3.15.4-pyhd8ed1ab_0
|
| 266 |
+
Linking python-dateutil-2.9.0-pyhd8ed1ab_0
|
| 267 |
+
Linking h2-4.1.0-pyhd8ed1ab_0
|
| 268 |
+
Linking brotli-python-1.1.0-py311hb755f60_1
|
| 269 |
+
Linking markupsafe-2.1.5-py311h459d7ec_0
|
| 270 |
+
Linking gmpy2-2.1.5-py311hc4f1f91_1
|
| 271 |
+
Linking pyyaml-6.0.1-py311h459d7ec_1
|
| 272 |
+
Linking pillow-9.4.0-py311h50def17_1
|
| 273 |
+
Linking numpy-2.0.0-py311h1461c94_0
|
| 274 |
+
Linking cffi-1.16.0-py311hb3a22ac_0
|
| 275 |
+
Linking pandas-2.2.2-py311h14de704_1
|
| 276 |
+
Linking zstandard-0.22.0-py311hb6f056b_1
|
| 277 |
+
Linking jinja2-3.1.4-pyhd8ed1ab_0
|
| 278 |
+
Linking sympy-1.12.1-pypyh2585a3b_103
|
| 279 |
+
Linking urllib3-2.2.2-pyhd8ed1ab_1
|
| 280 |
+
Linking requests-2.32.3-pyhd8ed1ab_0
|
| 281 |
+
Linking pytorch-2.3.1-py3.11_cuda12.1_cudnn8.9.2_0
|
| 282 |
+
Linking torchtriton-2.3.1-py311
|
| 283 |
+
Linking torchvision-0.18.1-py311_cu121
|
| 284 |
+
|
| 285 |
+
Transaction finished
|
| 286 |
+
|
| 287 |
+
To activate this environment, use:
|
| 288 |
+
|
| 289 |
+
mamba activate auto-zcubaqpyrbpe
|
| 290 |
+
|
| 291 |
+
Or to execute a single command in this environment, use:
|
| 292 |
+
|
| 293 |
+
mamba run -n auto-zcubaqpyrbpe mycommand
|
| 294 |
+
|
| 295 |
+
Installing pip packages
|
| 296 |
+
WARNING: The candidate selected for download or install is a yanked version: 'opencv-python' candidate (version 4.5.5.62 at https://files.pythonhosted.org/packages/9d/98/36bfcbff30da27dd6922ed73ca7802c37d87f77daf4c569da3dcb87b4296/opencv_python-4.5.5.62-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from https://pypi.org/simple/opencv-python/) (requires-python:>=3.6))
|
| 297 |
+
Reason for being yanked: deprecated, use 4.5.5.64
|
| 298 |
+
Traceback (most recent call last):
|
| 299 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/metatrain_CausalStyle_RN.py", line 124, in <module>
|
| 300 |
+
base_loader = base_datamgr.get_data_loader( base_file , aug = params.train_aug )
|
| 301 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 302 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/data/datamgr.py", line 137, in get_data_loader
|
| 303 |
+
dataset = SetDataset( data_file , self.batch_size, transform )
|
| 304 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 305 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/data/dataset.py", line 62, in __init__
|
| 306 |
+
with open(data_file, 'r') as f:
|
| 307 |
+
^^^^^^^^^^^^^^^^^^^^
|
| 308 |
+
FileNotFoundError: [Errno 2] No such file or directory: '/scratch/yuqian_fu/Data/CDFSL/miniImagenet/base.json'
|
| 309 |
+
srun: error: gcpl4-eu-2: task 0: Exited with exit code 1
|
52729.log
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Collecting h5py>=2.9.0
|
| 2 |
+
Downloading h5py-3.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (2.5 kB)
|
| 3 |
+
Collecting ml-collections
|
| 4 |
+
Downloading ml_collections-0.1.1.tar.gz (77 kB)
|
| 5 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 77.9/77.9 kB 8.5 MB/s eta 0:00:00
|
| 6 |
+
Preparing metadata (setup.py): started
|
| 7 |
+
Preparing metadata (setup.py): finished with status 'done'
|
| 8 |
+
Collecting opencv-python==4.5.5.62
|
| 9 |
+
Downloading opencv_python-4.5.5.62-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (18 kB)
|
| 10 |
+
Collecting scipy>=1.3.2
|
| 11 |
+
Downloading scipy-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (60 kB)
|
| 12 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 60.8/60.8 kB 10.4 MB/s eta 0:00:00
|
| 13 |
+
Collecting tensorboard
|
| 14 |
+
Downloading tensorboard-2.17.0-py3-none-any.whl.metadata (1.6 kB)
|
| 15 |
+
Collecting tensorboardX>=1.4
|
| 16 |
+
Downloading tensorboardX-2.6.2.2-py2.py3-none-any.whl.metadata (5.8 kB)
|
| 17 |
+
Collecting timm
|
| 18 |
+
Downloading timm-1.0.7-py3-none-any.whl.metadata (47 kB)
|
| 19 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 47.5/47.5 kB 16.7 MB/s eta 0:00:00
|
| 20 |
+
Requirement already satisfied: numpy>=1.21.2 in ./lib/python3.11/site-packages (from opencv-python==4.5.5.62) (2.0.0)
|
| 21 |
+
Collecting absl-py (from ml-collections)
|
| 22 |
+
Downloading absl_py-2.1.0-py3-none-any.whl.metadata (2.3 kB)
|
| 23 |
+
Requirement already satisfied: PyYAML in ./lib/python3.11/site-packages (from ml-collections) (6.0.1)
|
| 24 |
+
Requirement already satisfied: six in ./lib/python3.11/site-packages (from ml-collections) (1.16.0)
|
| 25 |
+
Collecting contextlib2 (from ml-collections)
|
| 26 |
+
Downloading contextlib2-21.6.0-py2.py3-none-any.whl.metadata (4.1 kB)
|
| 27 |
+
Collecting grpcio>=1.48.2 (from tensorboard)
|
| 28 |
+
Downloading grpcio-1.64.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (3.3 kB)
|
| 29 |
+
Collecting markdown>=2.6.8 (from tensorboard)
|
| 30 |
+
Downloading Markdown-3.6-py3-none-any.whl.metadata (7.0 kB)
|
| 31 |
+
Collecting protobuf!=4.24.0,<5.0.0,>=3.19.6 (from tensorboard)
|
| 32 |
+
Downloading protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl.metadata (541 bytes)
|
| 33 |
+
Requirement already satisfied: setuptools>=41.0.0 in ./lib/python3.11/site-packages (from tensorboard) (70.1.1)
|
| 34 |
+
Collecting tensorboard-data-server<0.8.0,>=0.7.0 (from tensorboard)
|
| 35 |
+
Downloading tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl.metadata (1.1 kB)
|
| 36 |
+
Collecting werkzeug>=1.0.1 (from tensorboard)
|
| 37 |
+
Downloading werkzeug-3.0.3-py3-none-any.whl.metadata (3.7 kB)
|
| 38 |
+
Collecting packaging (from tensorboardX>=1.4)
|
| 39 |
+
Downloading packaging-24.1-py3-none-any.whl.metadata (3.2 kB)
|
| 40 |
+
Requirement already satisfied: torch in ./lib/python3.11/site-packages (from timm) (2.3.1)
|
| 41 |
+
Requirement already satisfied: torchvision in ./lib/python3.11/site-packages (from timm) (0.18.1)
|
| 42 |
+
Collecting huggingface_hub (from timm)
|
| 43 |
+
Downloading huggingface_hub-0.23.4-py3-none-any.whl.metadata (12 kB)
|
| 44 |
+
Collecting safetensors (from timm)
|
| 45 |
+
Downloading safetensors-0.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (3.8 kB)
|
| 46 |
+
Requirement already satisfied: MarkupSafe>=2.1.1 in ./lib/python3.11/site-packages (from werkzeug>=1.0.1->tensorboard) (2.1.5)
|
| 47 |
+
Requirement already satisfied: filelock in ./lib/python3.11/site-packages (from huggingface_hub->timm) (3.15.4)
|
| 48 |
+
Collecting fsspec>=2023.5.0 (from huggingface_hub->timm)
|
| 49 |
+
Downloading fsspec-2024.6.0-py3-none-any.whl.metadata (11 kB)
|
| 50 |
+
Requirement already satisfied: requests in ./lib/python3.11/site-packages (from huggingface_hub->timm) (2.32.3)
|
| 51 |
+
Collecting tqdm>=4.42.1 (from huggingface_hub->timm)
|
| 52 |
+
Downloading tqdm-4.66.4-py3-none-any.whl.metadata (57 kB)
|
| 53 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 57.6/57.6 kB 24.9 MB/s eta 0:00:00
|
| 54 |
+
Requirement already satisfied: typing-extensions>=3.7.4.3 in ./lib/python3.11/site-packages (from huggingface_hub->timm) (4.12.2)
|
| 55 |
+
Requirement already satisfied: sympy in ./lib/python3.11/site-packages (from torch->timm) (1.12.1)
|
| 56 |
+
Requirement already satisfied: networkx in ./lib/python3.11/site-packages (from torch->timm) (3.3)
|
| 57 |
+
Requirement already satisfied: jinja2 in ./lib/python3.11/site-packages (from torch->timm) (3.1.4)
|
| 58 |
+
Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in ./lib/python3.11/site-packages (from torchvision->timm) (9.4.0)
|
| 59 |
+
Requirement already satisfied: charset-normalizer<4,>=2 in ./lib/python3.11/site-packages (from requests->huggingface_hub->timm) (3.3.2)
|
| 60 |
+
Requirement already satisfied: idna<4,>=2.5 in ./lib/python3.11/site-packages (from requests->huggingface_hub->timm) (3.7)
|
| 61 |
+
Requirement already satisfied: urllib3<3,>=1.21.1 in ./lib/python3.11/site-packages (from requests->huggingface_hub->timm) (2.2.2)
|
| 62 |
+
Requirement already satisfied: certifi>=2017.4.17 in ./lib/python3.11/site-packages (from requests->huggingface_hub->timm) (2024.6.2)
|
| 63 |
+
Requirement already satisfied: mpmath<1.4.0,>=1.1.0 in ./lib/python3.11/site-packages (from sympy->torch->timm) (1.3.0)
|
| 64 |
+
Downloading opencv_python-4.5.5.62-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (60.4 MB)
|
| 65 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 60.4/60.4 MB 72.8 MB/s eta 0:00:00
|
| 66 |
+
Downloading h5py-3.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.4 MB)
|
| 67 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.4/5.4 MB 209.0 MB/s eta 0:00:00
|
| 68 |
+
Downloading scipy-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (41.1 MB)
|
| 69 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 41.1/41.1 MB 115.1 MB/s eta 0:00:00
|
| 70 |
+
Downloading tensorboard-2.17.0-py3-none-any.whl (5.5 MB)
|
| 71 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.5/5.5 MB 211.7 MB/s eta 0:00:00
|
| 72 |
+
Downloading tensorboardX-2.6.2.2-py2.py3-none-any.whl (101 kB)
|
| 73 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 101.7/101.7 kB 38.7 MB/s eta 0:00:00
|
| 74 |
+
Downloading timm-1.0.7-py3-none-any.whl (2.3 MB)
|
| 75 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.3/2.3 MB 181.5 MB/s eta 0:00:00
|
| 76 |
+
Downloading absl_py-2.1.0-py3-none-any.whl (133 kB)
|
| 77 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 133.7/133.7 kB 52.8 MB/s eta 0:00:00
|
| 78 |
+
Downloading grpcio-1.64.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.6 MB)
|
| 79 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.6/5.6 MB 212.4 MB/s eta 0:00:00
|
| 80 |
+
Downloading Markdown-3.6-py3-none-any.whl (105 kB)
|
| 81 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 105.4/105.4 kB 40.3 MB/s eta 0:00:00
|
| 82 |
+
Downloading protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl (294 kB)
|
| 83 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 294.6/294.6 kB 101.1 MB/s eta 0:00:00
|
| 84 |
+
Downloading tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl (6.6 MB)
|
| 85 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.6/6.6 MB 214.7 MB/s eta 0:00:00
|
| 86 |
+
Downloading werkzeug-3.0.3-py3-none-any.whl (227 kB)
|
| 87 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 227.3/227.3 kB 74.9 MB/s eta 0:00:00
|
| 88 |
+
Downloading contextlib2-21.6.0-py2.py3-none-any.whl (13 kB)
|
| 89 |
+
Downloading huggingface_hub-0.23.4-py3-none-any.whl (402 kB)
|
| 90 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 402.6/402.6 kB 109.9 MB/s eta 0:00:00
|
| 91 |
+
Downloading packaging-24.1-py3-none-any.whl (53 kB)
|
| 92 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 54.0/54.0 kB 21.8 MB/s eta 0:00:00
|
| 93 |
+
Downloading safetensors-0.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)
|
| 94 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.2/1.2 MB 170.9 MB/s eta 0:00:00
|
| 95 |
+
Downloading fsspec-2024.6.0-py3-none-any.whl (176 kB)
|
| 96 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 176.9/176.9 kB 62.5 MB/s eta 0:00:00
|
| 97 |
+
Downloading tqdm-4.66.4-py3-none-any.whl (78 kB)
|
| 98 |
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 78.3/78.3 kB 30.5 MB/s eta 0:00:00
|
| 99 |
+
Building wheels for collected packages: ml-collections
|
| 100 |
+
Building wheel for ml-collections (setup.py): started
|
| 101 |
+
Building wheel for ml-collections (setup.py): finished with status 'done'
|
| 102 |
+
Created wheel for ml-collections: filename=ml_collections-0.1.1-py3-none-any.whl size=94508 sha256=2e320bb7bf02566bf671fd943ea8dfe7cb6c35a1fab523a080d4ab487706ca51
|
| 103 |
+
Stored in directory: /scratch/yuqian_fu/.cache/pip/wheels/28/82/ef/a6971b09a96519d55ce6efef66f0cbcdef2ae9cc1e6b41daf7
|
| 104 |
+
Successfully built ml-collections
|
| 105 |
+
Installing collected packages: werkzeug, tqdm, tensorboard-data-server, scipy, safetensors, protobuf, packaging, opencv-python, markdown, h5py, grpcio, fsspec, contextlib2, absl-py, tensorboardX, tensorboard, ml-collections, huggingface_hub, timm
|
| 106 |
+
Successfully installed absl-py-2.1.0 contextlib2-21.6.0 fsspec-2024.6.0 grpcio-1.64.1 h5py-3.11.0 huggingface_hub-0.23.4 markdown-3.6 ml-collections-0.1.1 opencv-python-4.5.5.62 packaging-24.1 protobuf-4.25.3 safetensors-0.4.3 scipy-1.14.0 tensorboard-2.17.0 tensorboard-data-server-0.7.2 tensorboardX-2.6.2.2 timm-1.0.7 tqdm-4.66.4 werkzeug-3.0.3
|
| 107 |
+
backbone: maml: False
|
| 108 |
+
hi this is causal style
|
| 109 |
+
set seed = 0
|
| 110 |
+
|
| 111 |
+
--- prepare dataloader ---
|
| 112 |
+
train with single seen domain miniImagenet
|
| 113 |
+
|
| 114 |
+
--- build model ---
|
Meta-causal/code-stage1-pipeline/56451.error
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Solving dependencies
|
| 2 |
+
Installing conda packages
|
| 3 |
+
Empty environment created at prefix: /scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem
|
| 4 |
+
error libmamba Could not lock non-existing path '/scratch/yuqian_fu/micromamba/pkgs'
|
| 5 |
+
Transaction
|
| 6 |
+
|
| 7 |
+
Prefix: /scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
No specs added or removed.
|
| 12 |
+
|
| 13 |
+
Package Version Build Channel Size
|
| 14 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 15 |
+
Install:
|
| 16 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 17 |
+
|
| 18 |
+
+ _libgcc_mutex 0.1 conda_forge conda-forge
|
| 19 |
+
+ _openmp_mutex 4.5 2_kmp_llvm conda-forge
|
| 20 |
+
+ blas 2.116 mkl conda-forge
|
| 21 |
+
+ blas-devel 3.9.0 16_linux64_mkl conda-forge
|
| 22 |
+
+ brotli-python 1.1.0 py311hb755f60_1 conda-forge
|
| 23 |
+
+ bzip2 1.0.8 hd590300_5 conda-forge
|
| 24 |
+
+ ca-certificates 2024.6.2 hbcca054_0 conda-forge
|
| 25 |
+
+ certifi 2024.6.2 pyhd8ed1ab_0 conda-forge
|
| 26 |
+
+ cffi 1.16.0 py311hb3a22ac_0 conda-forge
|
| 27 |
+
+ charset-normalizer 3.3.2 pyhd8ed1ab_0 conda-forge
|
| 28 |
+
+ click 8.1.7 unix_pyh707e725_0 conda-forge
|
| 29 |
+
+ cuda-cudart 12.1.105 0 nvidia
|
| 30 |
+
+ cuda-cupti 12.1.105 0 /work/conda/cache/nvidia
|
| 31 |
+
+ cuda-libraries 12.1.0 0 nvidia
|
| 32 |
+
+ cuda-nvrtc 12.1.105 0 /work/conda/cache/nvidia
|
| 33 |
+
+ cuda-nvtx 12.1.105 0 nvidia
|
| 34 |
+
+ cuda-opencl 12.5.39 0 nvidia
|
| 35 |
+
+ cuda-runtime 12.1.0 0 nvidia
|
| 36 |
+
+ cuda-version 12.5 3 nvidia
|
| 37 |
+
+ ffmpeg 4.3 hf484d3e_0 /work/conda/cache/pytorch
|
| 38 |
+
+ filelock 3.15.4 pyhd8ed1ab_0 conda-forge
|
| 39 |
+
+ freetype 2.12.1 h267a509_2 conda-forge
|
| 40 |
+
+ gmp 6.3.0 hac33072_2 conda-forge
|
| 41 |
+
+ gmpy2 2.1.5 py311hc4f1f91_1 conda-forge
|
| 42 |
+
+ gnutls 3.6.13 h85f3911_1 /work/conda/cache/conda-forge
|
| 43 |
+
+ h2 4.1.0 pyhd8ed1ab_0 conda-forge
|
| 44 |
+
+ hpack 4.0.0 pyh9f0ad1d_0 conda-forge
|
| 45 |
+
+ hyperframe 6.0.1 pyhd8ed1ab_0 conda-forge
|
| 46 |
+
+ icu 73.2 h59595ed_0 /work/conda/cache/conda-forge
|
| 47 |
+
+ idna 3.7 pyhd8ed1ab_0 conda-forge
|
| 48 |
+
+ jinja2 3.1.4 pyhd8ed1ab_0 conda-forge
|
| 49 |
+
+ jpeg 9e h166bdaf_2 conda-forge
|
| 50 |
+
+ lame 3.100 h166bdaf_1003 conda-forge
|
| 51 |
+
+ lcms2 2.15 hfd0df8a_0 conda-forge
|
| 52 |
+
+ ld_impl_linux-64 2.40 hf3520f5_7 conda-forge
|
| 53 |
+
+ lerc 4.0.0 h27087fc_0 conda-forge
|
| 54 |
+
+ libblas 3.9.0 16_linux64_mkl conda-forge
|
| 55 |
+
+ libcblas 3.9.0 16_linux64_mkl conda-forge
|
| 56 |
+
+ libcublas 12.1.0.26 0 /work/conda/cache/nvidia
|
| 57 |
+
+ libcufft 11.0.2.4 0 /work/conda/cache/nvidia
|
| 58 |
+
+ libcufile 1.10.0.4 0 nvidia
|
| 59 |
+
+ libcurand 10.3.6.39 0 nvidia
|
| 60 |
+
+ libcusolver 11.4.4.55 0 /work/conda/cache/nvidia
|
| 61 |
+
+ libcusparse 12.0.2.55 0 /work/conda/cache/nvidia
|
| 62 |
+
+ libdeflate 1.17 h0b41bf4_0 conda-forge
|
| 63 |
+
+ libexpat 2.6.2 h59595ed_0 conda-forge
|
| 64 |
+
+ libffi 3.4.2 h7f98852_5 conda-forge
|
| 65 |
+
+ libgcc-ng 14.1.0 h77fa898_0 conda-forge
|
| 66 |
+
+ libgfortran-ng 14.1.0 h69a702a_0 conda-forge
|
| 67 |
+
+ libgfortran5 14.1.0 hc5f4f2c_0 conda-forge
|
| 68 |
+
+ libhwloc 2.10.0 default_h5622ce7_1001 conda-forge
|
| 69 |
+
+ libiconv 1.17 hd590300_2 conda-forge
|
| 70 |
+
+ libjpeg-turbo 2.0.0 h9bf148f_0 pytorch
|
| 71 |
+
+ liblapack 3.9.0 16_linux64_mkl conda-forge
|
| 72 |
+
+ liblapacke 3.9.0 16_linux64_mkl conda-forge
|
| 73 |
+
+ libnpp 12.0.2.50 0 /work/conda/cache/nvidia
|
| 74 |
+
+ libnsl 2.0.1 hd590300_0 conda-forge
|
| 75 |
+
+ libnvjitlink 12.1.105 0 /work/conda/cache/nvidia
|
| 76 |
+
+ libnvjpeg 12.1.1.14 0 /work/conda/cache/nvidia
|
| 77 |
+
+ libpng 1.6.43 h2797004_0 conda-forge
|
| 78 |
+
+ libsqlite 3.46.0 hde9e2c9_0 conda-forge
|
| 79 |
+
+ libstdcxx-ng 14.1.0 hc0a3c3a_0 conda-forge
|
| 80 |
+
+ libtiff 4.5.0 h6adf6a1_2 conda-forge
|
| 81 |
+
+ libuuid 2.38.1 h0b41bf4_0 conda-forge
|
| 82 |
+
+ libwebp-base 1.4.0 hd590300_0 conda-forge
|
| 83 |
+
+ libxcb 1.13 h7f98852_1004 conda-forge
|
| 84 |
+
+ libxcrypt 4.4.36 hd590300_1 conda-forge
|
| 85 |
+
+ libxml2 2.12.7 hc051c1a_1 conda-forge
|
| 86 |
+
+ libzlib 1.2.13 h4ab18f5_6 conda-forge
|
| 87 |
+
+ llvm-openmp 15.0.7 h0cdce71_0 /work/conda/cache/conda-forge
|
| 88 |
+
+ markupsafe 2.1.5 py311h459d7ec_0 conda-forge
|
| 89 |
+
+ mkl 2022.1.0 h84fe81f_915 /work/conda/cache/conda-forge
|
| 90 |
+
+ mkl-devel 2022.1.0 ha770c72_916 conda-forge
|
| 91 |
+
+ mkl-include 2022.1.0 h84fe81f_915 conda-forge
|
| 92 |
+
+ mpc 1.3.1 hfe3b2da_0 conda-forge
|
| 93 |
+
+ mpfr 4.2.1 h9458935_1 conda-forge
|
| 94 |
+
+ mpmath 1.3.0 pyhd8ed1ab_0 conda-forge
|
| 95 |
+
+ ncurses 6.5 h59595ed_0 conda-forge
|
| 96 |
+
+ nettle 3.6 he412f7d_0 /work/conda/cache/conda-forge
|
| 97 |
+
+ networkx 3.3 pyhd8ed1ab_1 /work/conda/cache/conda-forge
|
| 98 |
+
+ numpy 2.0.0 py311h1461c94_0 conda-forge
|
| 99 |
+
+ openh264 2.1.1 h780b84a_0 /work/conda/cache/conda-forge
|
| 100 |
+
+ openjpeg 2.5.0 hfec8fc6_2 conda-forge
|
| 101 |
+
+ openssl 3.3.1 h4ab18f5_1 conda-forge
|
| 102 |
+
+ pandas 2.2.2 py311h14de704_1 conda-forge
|
| 103 |
+
+ pillow 9.4.0 py311h50def17_1 conda-forge
|
| 104 |
+
+ pip 24.0 pyhd8ed1ab_0 /work/conda/cache/conda-forge
|
| 105 |
+
+ pthread-stubs 0.4 h36c2ea0_1001 conda-forge
|
| 106 |
+
+ pycparser 2.22 pyhd8ed1ab_0 conda-forge
|
| 107 |
+
+ pysocks 1.7.1 pyha2e5f31_6 conda-forge
|
| 108 |
+
+ python 3.11.9 hb806964_0_cpython /work/conda/cache/conda-forge
|
| 109 |
+
+ python-dateutil 2.9.0 pyhd8ed1ab_0 conda-forge
|
| 110 |
+
+ python-tzdata 2024.1 pyhd8ed1ab_0 conda-forge
|
| 111 |
+
+ python_abi 3.11 4_cp311 conda-forge
|
| 112 |
+
+ pytorch 2.3.1 py3.11_cuda12.1_cudnn8.9.2_0 pytorch
|
| 113 |
+
+ pytorch-cuda 12.1 ha16c6d3_5 pytorch
|
| 114 |
+
+ pytorch-mutex 1.0 cuda pytorch
|
| 115 |
+
+ pytz 2024.1 pyhd8ed1ab_0 conda-forge
|
| 116 |
+
+ pyyaml 6.0.1 py311h459d7ec_1 conda-forge
|
| 117 |
+
+ readline 8.2 h8228510_1 conda-forge
|
| 118 |
+
+ requests 2.32.3 pyhd8ed1ab_0 conda-forge
|
| 119 |
+
+ setuptools 70.1.1 pyhd8ed1ab_0 conda-forge
|
| 120 |
+
+ six 1.16.0 pyh6c4a22f_0 conda-forge
|
| 121 |
+
+ sympy 1.12.1 pypyh2585a3b_103 conda-forge
|
| 122 |
+
+ tbb 2021.12.0 h297d8ca_1 conda-forge
|
| 123 |
+
+ tk 8.6.13 noxft_h4845f30_101 /work/conda/cache/conda-forge
|
| 124 |
+
+ torchtriton 2.3.1 py311 pytorch
|
| 125 |
+
+ torchvision 0.18.1 py311_cu121 pytorch
|
| 126 |
+
+ typing_extensions 4.12.2 pyha770c72_0 conda-forge
|
| 127 |
+
+ tzdata 2024a h0c530f3_0 conda-forge
|
| 128 |
+
+ urllib3 2.2.2 pyhd8ed1ab_1 conda-forge
|
| 129 |
+
+ wheel 0.43.0 pyhd8ed1ab_1 conda-forge
|
| 130 |
+
+ xorg-libxau 1.0.11 hd590300_0 conda-forge
|
| 131 |
+
+ xorg-libxdmcp 1.1.3 h516909a_0 conda-forge
|
| 132 |
+
+ xz 5.2.6 h166bdaf_0 conda-forge
|
| 133 |
+
+ yaml 0.2.5 h7f98852_2 conda-forge
|
| 134 |
+
+ zlib 1.2.13 h4ab18f5_6 conda-forge
|
| 135 |
+
+ zstandard 0.22.0 py311hb6f056b_1 conda-forge
|
| 136 |
+
+ zstd 1.5.6 ha6fb4c9_0 conda-forge
|
| 137 |
+
|
| 138 |
+
Summary:
|
| 139 |
+
|
| 140 |
+
Install: 119 packages
|
| 141 |
+
|
| 142 |
+
Total download: 0 B
|
| 143 |
+
|
| 144 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
Transaction starting
|
| 149 |
+
Linking libcublas-12.1.0.26-0
|
| 150 |
+
Linking libcufft-11.0.2.4-0
|
| 151 |
+
Linking libcusolver-11.4.4.55-0
|
| 152 |
+
Linking libcusparse-12.0.2.55-0
|
| 153 |
+
Linking libnpp-12.0.2.50-0
|
| 154 |
+
Linking libnvjitlink-12.1.105-0
|
| 155 |
+
Linking cuda-cudart-12.1.105-0
|
| 156 |
+
Linking cuda-nvrtc-12.1.105-0
|
| 157 |
+
Linking libnvjpeg-12.1.1.14-0
|
| 158 |
+
Linking cuda-cupti-12.1.105-0
|
| 159 |
+
Linking cuda-nvtx-12.1.105-0
|
| 160 |
+
Linking pytorch-mutex-1.0-cuda
|
| 161 |
+
Linking _libgcc_mutex-0.1-conda_forge
|
| 162 |
+
Linking mkl-include-2022.1.0-h84fe81f_915
|
| 163 |
+
Linking python_abi-3.11-4_cp311
|
| 164 |
+
Linking ld_impl_linux-64-2.40-hf3520f5_7
|
| 165 |
+
Linking ca-certificates-2024.6.2-hbcca054_0
|
| 166 |
+
Linking libgcc-ng-14.1.0-h77fa898_0
|
| 167 |
+
Linking libzlib-1.2.13-h4ab18f5_6
|
| 168 |
+
Linking llvm-openmp-15.0.7-h0cdce71_0
|
| 169 |
+
Linking _openmp_mutex-4.5-2_kmp_llvm
|
| 170 |
+
Linking xorg-libxdmcp-1.1.3-h516909a_0
|
| 171 |
+
Linking pthread-stubs-0.4-h36c2ea0_1001
|
| 172 |
+
Linking xorg-libxau-1.0.11-hd590300_0
|
| 173 |
+
Linking libwebp-base-1.4.0-hd590300_0
|
| 174 |
+
Linking libdeflate-1.17-h0b41bf4_0
|
| 175 |
+
Linking jpeg-9e-h166bdaf_2
|
| 176 |
+
Linking libffi-3.4.2-h7f98852_5
|
| 177 |
+
Linking tk-8.6.13-noxft_h4845f30_101
|
| 178 |
+
Linking openssl-3.3.1-h4ab18f5_1
|
| 179 |
+
Linking libxcrypt-4.4.36-hd590300_1
|
| 180 |
+
Linking libsqlite-3.46.0-hde9e2c9_0
|
| 181 |
+
Linking yaml-0.2.5-h7f98852_2
|
| 182 |
+
Linking ncurses-6.5-h59595ed_0
|
| 183 |
+
Linking libgfortran5-14.1.0-hc5f4f2c_0
|
| 184 |
+
Linking lame-3.100-h166bdaf_1003
|
| 185 |
+
Linking nettle-3.6-he412f7d_0
|
| 186 |
+
Linking zlib-1.2.13-h4ab18f5_6
|
| 187 |
+
Linking libstdcxx-ng-14.1.0-hc0a3c3a_0
|
| 188 |
+
Linking libiconv-1.17-hd590300_2
|
| 189 |
+
Linking bzip2-1.0.8-hd590300_5
|
| 190 |
+
Linking libpng-1.6.43-h2797004_0
|
| 191 |
+
Linking xz-5.2.6-h166bdaf_0
|
| 192 |
+
Linking libuuid-2.38.1-h0b41bf4_0
|
| 193 |
+
Linking libnsl-2.0.1-hd590300_0
|
| 194 |
+
Linking libexpat-2.6.2-h59595ed_0
|
| 195 |
+
Linking libxcb-1.13-h7f98852_1004
|
| 196 |
+
Linking readline-8.2-h8228510_1
|
| 197 |
+
Linking libgfortran-ng-14.1.0-h69a702a_0
|
| 198 |
+
Linking icu-73.2-h59595ed_0
|
| 199 |
+
Linking zstd-1.5.6-ha6fb4c9_0
|
| 200 |
+
Linking lerc-4.0.0-h27087fc_0
|
| 201 |
+
Linking openh264-2.1.1-h780b84a_0
|
| 202 |
+
Linking gnutls-3.6.13-h85f3911_1
|
| 203 |
+
Linking gmp-6.3.0-hac33072_2
|
| 204 |
+
Linking freetype-2.12.1-h267a509_2
|
| 205 |
+
Linking libxml2-2.12.7-hc051c1a_1
|
| 206 |
+
Linking libtiff-4.5.0-h6adf6a1_2
|
| 207 |
+
Linking mpfr-4.2.1-h9458935_1
|
| 208 |
+
Linking libhwloc-2.10.0-default_h5622ce7_1001
|
| 209 |
+
Linking openjpeg-2.5.0-hfec8fc6_2
|
| 210 |
+
Linking lcms2-2.15-hfd0df8a_0
|
| 211 |
+
Linking mpc-1.3.1-hfe3b2da_0
|
| 212 |
+
Linking tbb-2021.12.0-h297d8ca_1
|
| 213 |
+
Linking mkl-2022.1.0-h84fe81f_915
|
| 214 |
+
Linking mkl-devel-2022.1.0-ha770c72_916
|
| 215 |
+
Linking libblas-3.9.0-16_linux64_mkl
|
| 216 |
+
Linking liblapack-3.9.0-16_linux64_mkl
|
| 217 |
+
Linking libcblas-3.9.0-16_linux64_mkl
|
| 218 |
+
Linking liblapacke-3.9.0-16_linux64_mkl
|
| 219 |
+
Linking blas-devel-3.9.0-16_linux64_mkl
|
| 220 |
+
Linking blas-2.116-mkl
|
| 221 |
+
Linking cuda-version-12.5-3
|
| 222 |
+
Linking tzdata-2024a-h0c530f3_0
|
| 223 |
+
Linking libjpeg-turbo-2.0.0-h9bf148f_0
|
| 224 |
+
warning libmamba [libjpeg-turbo-2.0.0-h9bf148f_0] The following files were already present in the environment:
|
| 225 |
+
- bin/cjpeg
|
| 226 |
+
- bin/djpeg
|
| 227 |
+
- bin/jpegtran
|
| 228 |
+
- bin/rdjpgcom
|
| 229 |
+
- bin/wrjpgcom
|
| 230 |
+
- include/jconfig.h
|
| 231 |
+
- include/jerror.h
|
| 232 |
+
- include/jmorecfg.h
|
| 233 |
+
- include/jpeglib.h
|
| 234 |
+
- lib/libjpeg.a
|
| 235 |
+
- lib/libjpeg.so
|
| 236 |
+
- lib/pkgconfig/libjpeg.pc
|
| 237 |
+
- share/man/man1/cjpeg.1
|
| 238 |
+
- share/man/man1/djpeg.1
|
| 239 |
+
- share/man/man1/jpegtran.1
|
| 240 |
+
- share/man/man1/rdjpgcom.1
|
| 241 |
+
- share/man/man1/wrjpgcom.1
|
| 242 |
+
Linking ffmpeg-4.3-hf484d3e_0
|
| 243 |
+
Linking libcurand-10.3.6.39-0
|
| 244 |
+
Linking libcufile-1.10.0.4-0
|
| 245 |
+
Linking cuda-opencl-12.5.39-0
|
| 246 |
+
Linking cuda-libraries-12.1.0-0
|
| 247 |
+
Linking cuda-runtime-12.1.0-0
|
| 248 |
+
Linking python-3.11.9-hb806964_0_cpython
|
| 249 |
+
Linking pytorch-cuda-12.1-ha16c6d3_5
|
| 250 |
+
Linking wheel-0.43.0-pyhd8ed1ab_1
|
| 251 |
+
Linking setuptools-70.1.1-pyhd8ed1ab_0
|
| 252 |
+
Linking pip-24.0-pyhd8ed1ab_0
|
| 253 |
+
Linking pycparser-2.22-pyhd8ed1ab_0
|
| 254 |
+
Linking six-1.16.0-pyh6c4a22f_0
|
| 255 |
+
Linking hyperframe-6.0.1-pyhd8ed1ab_0
|
| 256 |
+
Linking pytz-2024.1-pyhd8ed1ab_0
|
| 257 |
+
Linking python-tzdata-2024.1-pyhd8ed1ab_0
|
| 258 |
+
Linking charset-normalizer-3.3.2-pyhd8ed1ab_0
|
| 259 |
+
Linking hpack-4.0.0-pyh9f0ad1d_0
|
| 260 |
+
Linking pysocks-1.7.1-pyha2e5f31_6
|
| 261 |
+
Linking idna-3.7-pyhd8ed1ab_0
|
| 262 |
+
Linking certifi-2024.6.2-pyhd8ed1ab_0
|
| 263 |
+
Linking mpmath-1.3.0-pyhd8ed1ab_0
|
| 264 |
+
Linking typing_extensions-4.12.2-pyha770c72_0
|
| 265 |
+
Linking networkx-3.3-pyhd8ed1ab_1
|
| 266 |
+
Linking filelock-3.15.4-pyhd8ed1ab_0
|
| 267 |
+
Linking click-8.1.7-unix_pyh707e725_0
|
| 268 |
+
Linking python-dateutil-2.9.0-pyhd8ed1ab_0
|
| 269 |
+
Linking h2-4.1.0-pyhd8ed1ab_0
|
| 270 |
+
Linking brotli-python-1.1.0-py311hb755f60_1
|
| 271 |
+
Linking markupsafe-2.1.5-py311h459d7ec_0
|
| 272 |
+
Linking gmpy2-2.1.5-py311hc4f1f91_1
|
| 273 |
+
Linking pyyaml-6.0.1-py311h459d7ec_1
|
| 274 |
+
Linking pillow-9.4.0-py311h50def17_1
|
| 275 |
+
Linking numpy-2.0.0-py311h1461c94_0
|
| 276 |
+
Linking cffi-1.16.0-py311hb3a22ac_0
|
| 277 |
+
Linking pandas-2.2.2-py311h14de704_1
|
| 278 |
+
Linking zstandard-0.22.0-py311hb6f056b_1
|
| 279 |
+
Linking jinja2-3.1.4-pyhd8ed1ab_0
|
| 280 |
+
Linking sympy-1.12.1-pypyh2585a3b_103
|
| 281 |
+
Linking urllib3-2.2.2-pyhd8ed1ab_1
|
| 282 |
+
Linking requests-2.32.3-pyhd8ed1ab_0
|
| 283 |
+
Linking pytorch-2.3.1-py3.11_cuda12.1_cudnn8.9.2_0
|
| 284 |
+
Linking torchtriton-2.3.1-py311
|
| 285 |
+
Linking torchvision-0.18.1-py311_cu121
|
| 286 |
+
|
| 287 |
+
Transaction finished
|
| 288 |
+
|
| 289 |
+
To activate this environment, use:
|
| 290 |
+
|
| 291 |
+
mamba activate auto-uvapqvk3mmem
|
| 292 |
+
|
| 293 |
+
Or to execute a single command in this environment, use:
|
| 294 |
+
|
| 295 |
+
mamba run -n auto-uvapqvk3mmem mycommand
|
| 296 |
+
|
| 297 |
+
slurmstepd: error: *** JOB 56451 ON gcpl4-eu-1 CANCELLED AT 2024-07-03T18:51:16 ***
|
Meta-causal/code-stage1-pipeline/56451.log
ADDED
|
File without changes
|
Meta-causal/code-stage1-pipeline/56452.error
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 0 |
0%| | 0.00/44.7M [00:00<?, ?B/s]
|
| 1 |
77%|███████▋ | 34.4M/44.7M [00:00<00:00, 360MB/s]
|
|
|
|
|
|
|
|
|
| 1 |
+
Solving dependencies
|
| 2 |
+
Installing conda packages
|
| 3 |
+
Empty environment created at prefix: /scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem
|
| 4 |
+
Transaction
|
| 5 |
+
|
| 6 |
+
Prefix: /scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
No specs added or removed.
|
| 11 |
+
|
| 12 |
+
Package Version Build Channel Size
|
| 13 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 14 |
+
Install:
|
| 15 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 16 |
+
|
| 17 |
+
+ _libgcc_mutex 0.1 conda_forge conda-forge
|
| 18 |
+
+ _openmp_mutex 4.5 2_kmp_llvm conda-forge
|
| 19 |
+
+ blas 2.116 mkl conda-forge
|
| 20 |
+
+ blas-devel 3.9.0 16_linux64_mkl conda-forge
|
| 21 |
+
+ brotli-python 1.1.0 py311hb755f60_1 conda-forge
|
| 22 |
+
+ bzip2 1.0.8 hd590300_5 conda-forge
|
| 23 |
+
+ ca-certificates 2024.6.2 hbcca054_0 conda-forge
|
| 24 |
+
+ certifi 2024.6.2 pyhd8ed1ab_0 conda-forge
|
| 25 |
+
+ cffi 1.16.0 py311hb3a22ac_0 conda-forge
|
| 26 |
+
+ charset-normalizer 3.3.2 pyhd8ed1ab_0 conda-forge
|
| 27 |
+
+ click 8.1.7 unix_pyh707e725_0 conda-forge
|
| 28 |
+
+ cuda-cudart 12.1.105 0 nvidia
|
| 29 |
+
+ cuda-cupti 12.1.105 0 /work/conda/cache/nvidia
|
| 30 |
+
+ cuda-libraries 12.1.0 0 nvidia
|
| 31 |
+
+ cuda-nvrtc 12.1.105 0 /work/conda/cache/nvidia
|
| 32 |
+
+ cuda-nvtx 12.1.105 0 nvidia
|
| 33 |
+
+ cuda-opencl 12.5.39 0 nvidia
|
| 34 |
+
+ cuda-runtime 12.1.0 0 nvidia
|
| 35 |
+
+ cuda-version 12.5 3 nvidia
|
| 36 |
+
+ ffmpeg 4.3 hf484d3e_0 /work/conda/cache/pytorch
|
| 37 |
+
+ filelock 3.15.4 pyhd8ed1ab_0 conda-forge
|
| 38 |
+
+ freetype 2.12.1 h267a509_2 conda-forge
|
| 39 |
+
+ gmp 6.3.0 hac33072_2 conda-forge
|
| 40 |
+
+ gmpy2 2.1.5 py311hc4f1f91_1 conda-forge
|
| 41 |
+
+ gnutls 3.6.13 h85f3911_1 /work/conda/cache/conda-forge
|
| 42 |
+
+ h2 4.1.0 pyhd8ed1ab_0 conda-forge
|
| 43 |
+
+ hpack 4.0.0 pyh9f0ad1d_0 conda-forge
|
| 44 |
+
+ hyperframe 6.0.1 pyhd8ed1ab_0 conda-forge
|
| 45 |
+
+ icu 73.2 h59595ed_0 /work/conda/cache/conda-forge
|
| 46 |
+
+ idna 3.7 pyhd8ed1ab_0 conda-forge
|
| 47 |
+
+ jinja2 3.1.4 pyhd8ed1ab_0 conda-forge
|
| 48 |
+
+ jpeg 9e h166bdaf_2 conda-forge
|
| 49 |
+
+ lame 3.100 h166bdaf_1003 conda-forge
|
| 50 |
+
+ lcms2 2.15 hfd0df8a_0 conda-forge
|
| 51 |
+
+ ld_impl_linux-64 2.40 hf3520f5_7 conda-forge
|
| 52 |
+
+ lerc 4.0.0 h27087fc_0 conda-forge
|
| 53 |
+
+ libblas 3.9.0 16_linux64_mkl conda-forge
|
| 54 |
+
+ libcblas 3.9.0 16_linux64_mkl conda-forge
|
| 55 |
+
+ libcublas 12.1.0.26 0 /work/conda/cache/nvidia
|
| 56 |
+
+ libcufft 11.0.2.4 0 /work/conda/cache/nvidia
|
| 57 |
+
+ libcufile 1.10.0.4 0 nvidia
|
| 58 |
+
+ libcurand 10.3.6.39 0 nvidia
|
| 59 |
+
+ libcusolver 11.4.4.55 0 /work/conda/cache/nvidia
|
| 60 |
+
+ libcusparse 12.0.2.55 0 /work/conda/cache/nvidia
|
| 61 |
+
+ libdeflate 1.17 h0b41bf4_0 conda-forge
|
| 62 |
+
+ libexpat 2.6.2 h59595ed_0 conda-forge
|
| 63 |
+
+ libffi 3.4.2 h7f98852_5 conda-forge
|
| 64 |
+
+ libgcc-ng 14.1.0 h77fa898_0 conda-forge
|
| 65 |
+
+ libgfortran-ng 14.1.0 h69a702a_0 conda-forge
|
| 66 |
+
+ libgfortran5 14.1.0 hc5f4f2c_0 conda-forge
|
| 67 |
+
+ libhwloc 2.10.0 default_h5622ce7_1001 conda-forge
|
| 68 |
+
+ libiconv 1.17 hd590300_2 conda-forge
|
| 69 |
+
+ libjpeg-turbo 2.0.0 h9bf148f_0 pytorch
|
| 70 |
+
+ liblapack 3.9.0 16_linux64_mkl conda-forge
|
| 71 |
+
+ liblapacke 3.9.0 16_linux64_mkl conda-forge
|
| 72 |
+
+ libnpp 12.0.2.50 0 /work/conda/cache/nvidia
|
| 73 |
+
+ libnsl 2.0.1 hd590300_0 conda-forge
|
| 74 |
+
+ libnvjitlink 12.1.105 0 /work/conda/cache/nvidia
|
| 75 |
+
+ libnvjpeg 12.1.1.14 0 /work/conda/cache/nvidia
|
| 76 |
+
+ libpng 1.6.43 h2797004_0 conda-forge
|
| 77 |
+
+ libsqlite 3.46.0 hde9e2c9_0 conda-forge
|
| 78 |
+
+ libstdcxx-ng 14.1.0 hc0a3c3a_0 conda-forge
|
| 79 |
+
+ libtiff 4.5.0 h6adf6a1_2 conda-forge
|
| 80 |
+
+ libuuid 2.38.1 h0b41bf4_0 conda-forge
|
| 81 |
+
+ libwebp-base 1.4.0 hd590300_0 conda-forge
|
| 82 |
+
+ libxcb 1.13 h7f98852_1004 conda-forge
|
| 83 |
+
+ libxcrypt 4.4.36 hd590300_1 conda-forge
|
| 84 |
+
+ libxml2 2.12.7 hc051c1a_1 conda-forge
|
| 85 |
+
+ libzlib 1.2.13 h4ab18f5_6 conda-forge
|
| 86 |
+
+ llvm-openmp 15.0.7 h0cdce71_0 /work/conda/cache/conda-forge
|
| 87 |
+
+ markupsafe 2.1.5 py311h459d7ec_0 conda-forge
|
| 88 |
+
+ mkl 2022.1.0 h84fe81f_915 /work/conda/cache/conda-forge
|
| 89 |
+
+ mkl-devel 2022.1.0 ha770c72_916 conda-forge
|
| 90 |
+
+ mkl-include 2022.1.0 h84fe81f_915 conda-forge
|
| 91 |
+
+ mpc 1.3.1 hfe3b2da_0 conda-forge
|
| 92 |
+
+ mpfr 4.2.1 h9458935_1 conda-forge
|
| 93 |
+
+ mpmath 1.3.0 pyhd8ed1ab_0 conda-forge
|
| 94 |
+
+ ncurses 6.5 h59595ed_0 conda-forge
|
| 95 |
+
+ nettle 3.6 he412f7d_0 /work/conda/cache/conda-forge
|
| 96 |
+
+ networkx 3.3 pyhd8ed1ab_1 /work/conda/cache/conda-forge
|
| 97 |
+
+ numpy 2.0.0 py311h1461c94_0 conda-forge
|
| 98 |
+
+ openh264 2.1.1 h780b84a_0 /work/conda/cache/conda-forge
|
| 99 |
+
+ openjpeg 2.5.0 hfec8fc6_2 conda-forge
|
| 100 |
+
+ openssl 3.3.1 h4ab18f5_1 conda-forge
|
| 101 |
+
+ pandas 2.2.2 py311h14de704_1 conda-forge
|
| 102 |
+
+ pillow 9.4.0 py311h50def17_1 conda-forge
|
| 103 |
+
+ pip 24.0 pyhd8ed1ab_0 /work/conda/cache/conda-forge
|
| 104 |
+
+ pthread-stubs 0.4 h36c2ea0_1001 conda-forge
|
| 105 |
+
+ pycparser 2.22 pyhd8ed1ab_0 conda-forge
|
| 106 |
+
+ pysocks 1.7.1 pyha2e5f31_6 conda-forge
|
| 107 |
+
+ python 3.11.9 hb806964_0_cpython /work/conda/cache/conda-forge
|
| 108 |
+
+ python-dateutil 2.9.0 pyhd8ed1ab_0 conda-forge
|
| 109 |
+
+ python-tzdata 2024.1 pyhd8ed1ab_0 conda-forge
|
| 110 |
+
+ python_abi 3.11 4_cp311 conda-forge
|
| 111 |
+
+ pytorch 2.3.1 py3.11_cuda12.1_cudnn8.9.2_0 pytorch
|
| 112 |
+
+ pytorch-cuda 12.1 ha16c6d3_5 pytorch
|
| 113 |
+
+ pytorch-mutex 1.0 cuda pytorch
|
| 114 |
+
+ pytz 2024.1 pyhd8ed1ab_0 conda-forge
|
| 115 |
+
+ pyyaml 6.0.1 py311h459d7ec_1 conda-forge
|
| 116 |
+
+ readline 8.2 h8228510_1 conda-forge
|
| 117 |
+
+ requests 2.32.3 pyhd8ed1ab_0 conda-forge
|
| 118 |
+
+ setuptools 70.1.1 pyhd8ed1ab_0 conda-forge
|
| 119 |
+
+ six 1.16.0 pyh6c4a22f_0 conda-forge
|
| 120 |
+
+ sympy 1.12.1 pypyh2585a3b_103 conda-forge
|
| 121 |
+
+ tbb 2021.12.0 h297d8ca_1 conda-forge
|
| 122 |
+
+ tk 8.6.13 noxft_h4845f30_101 /work/conda/cache/conda-forge
|
| 123 |
+
+ torchtriton 2.3.1 py311 pytorch
|
| 124 |
+
+ torchvision 0.18.1 py311_cu121 pytorch
|
| 125 |
+
+ typing_extensions 4.12.2 pyha770c72_0 conda-forge
|
| 126 |
+
+ tzdata 2024a h0c530f3_0 conda-forge
|
| 127 |
+
+ urllib3 2.2.2 pyhd8ed1ab_1 conda-forge
|
| 128 |
+
+ wheel 0.43.0 pyhd8ed1ab_1 conda-forge
|
| 129 |
+
+ xorg-libxau 1.0.11 hd590300_0 conda-forge
|
| 130 |
+
+ xorg-libxdmcp 1.1.3 h516909a_0 conda-forge
|
| 131 |
+
+ xz 5.2.6 h166bdaf_0 conda-forge
|
| 132 |
+
+ yaml 0.2.5 h7f98852_2 conda-forge
|
| 133 |
+
+ zlib 1.2.13 h4ab18f5_6 conda-forge
|
| 134 |
+
+ zstandard 0.22.0 py311hb6f056b_1 conda-forge
|
| 135 |
+
+ zstd 1.5.6 ha6fb4c9_0 conda-forge
|
| 136 |
+
|
| 137 |
+
Summary:
|
| 138 |
+
|
| 139 |
+
Install: 119 packages
|
| 140 |
+
|
| 141 |
+
Total download: 0 B
|
| 142 |
+
|
| 143 |
+
─────────────────────────────────────────────────────────────────────────────────────────────────────────
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
Transaction starting
|
| 148 |
+
Linking libcublas-12.1.0.26-0
|
| 149 |
+
Linking libcufft-11.0.2.4-0
|
| 150 |
+
Linking libcusolver-11.4.4.55-0
|
| 151 |
+
Linking libcusparse-12.0.2.55-0
|
| 152 |
+
Linking libnpp-12.0.2.50-0
|
| 153 |
+
Linking libnvjitlink-12.1.105-0
|
| 154 |
+
Linking cuda-cudart-12.1.105-0
|
| 155 |
+
Linking cuda-nvrtc-12.1.105-0
|
| 156 |
+
Linking libnvjpeg-12.1.1.14-0
|
| 157 |
+
Linking cuda-cupti-12.1.105-0
|
| 158 |
+
Linking cuda-nvtx-12.1.105-0
|
| 159 |
+
Linking pytorch-mutex-1.0-cuda
|
| 160 |
+
Linking _libgcc_mutex-0.1-conda_forge
|
| 161 |
+
Linking mkl-include-2022.1.0-h84fe81f_915
|
| 162 |
+
Linking python_abi-3.11-4_cp311
|
| 163 |
+
Linking ld_impl_linux-64-2.40-hf3520f5_7
|
| 164 |
+
Linking ca-certificates-2024.6.2-hbcca054_0
|
| 165 |
+
Linking libgcc-ng-14.1.0-h77fa898_0
|
| 166 |
+
Linking libzlib-1.2.13-h4ab18f5_6
|
| 167 |
+
Linking llvm-openmp-15.0.7-h0cdce71_0
|
| 168 |
+
Linking _openmp_mutex-4.5-2_kmp_llvm
|
| 169 |
+
Linking xorg-libxdmcp-1.1.3-h516909a_0
|
| 170 |
+
Linking pthread-stubs-0.4-h36c2ea0_1001
|
| 171 |
+
Linking xorg-libxau-1.0.11-hd590300_0
|
| 172 |
+
Linking libwebp-base-1.4.0-hd590300_0
|
| 173 |
+
Linking libdeflate-1.17-h0b41bf4_0
|
| 174 |
+
Linking jpeg-9e-h166bdaf_2
|
| 175 |
+
Linking libffi-3.4.2-h7f98852_5
|
| 176 |
+
Linking tk-8.6.13-noxft_h4845f30_101
|
| 177 |
+
Linking openssl-3.3.1-h4ab18f5_1
|
| 178 |
+
Linking libxcrypt-4.4.36-hd590300_1
|
| 179 |
+
Linking libsqlite-3.46.0-hde9e2c9_0
|
| 180 |
+
Linking yaml-0.2.5-h7f98852_2
|
| 181 |
+
Linking ncurses-6.5-h59595ed_0
|
| 182 |
+
Linking libgfortran5-14.1.0-hc5f4f2c_0
|
| 183 |
+
Linking lame-3.100-h166bdaf_1003
|
| 184 |
+
Linking nettle-3.6-he412f7d_0
|
| 185 |
+
Linking zlib-1.2.13-h4ab18f5_6
|
| 186 |
+
Linking libstdcxx-ng-14.1.0-hc0a3c3a_0
|
| 187 |
+
Linking libiconv-1.17-hd590300_2
|
| 188 |
+
Linking bzip2-1.0.8-hd590300_5
|
| 189 |
+
Linking libpng-1.6.43-h2797004_0
|
| 190 |
+
Linking xz-5.2.6-h166bdaf_0
|
| 191 |
+
Linking libuuid-2.38.1-h0b41bf4_0
|
| 192 |
+
Linking libnsl-2.0.1-hd590300_0
|
| 193 |
+
Linking libexpat-2.6.2-h59595ed_0
|
| 194 |
+
Linking libxcb-1.13-h7f98852_1004
|
| 195 |
+
Linking readline-8.2-h8228510_1
|
| 196 |
+
Linking libgfortran-ng-14.1.0-h69a702a_0
|
| 197 |
+
Linking icu-73.2-h59595ed_0
|
| 198 |
+
Linking zstd-1.5.6-ha6fb4c9_0
|
| 199 |
+
Linking lerc-4.0.0-h27087fc_0
|
| 200 |
+
Linking openh264-2.1.1-h780b84a_0
|
| 201 |
+
Linking gnutls-3.6.13-h85f3911_1
|
| 202 |
+
Linking gmp-6.3.0-hac33072_2
|
| 203 |
+
Linking freetype-2.12.1-h267a509_2
|
| 204 |
+
Linking libxml2-2.12.7-hc051c1a_1
|
| 205 |
+
Linking libtiff-4.5.0-h6adf6a1_2
|
| 206 |
+
Linking mpfr-4.2.1-h9458935_1
|
| 207 |
+
Linking libhwloc-2.10.0-default_h5622ce7_1001
|
| 208 |
+
Linking openjpeg-2.5.0-hfec8fc6_2
|
| 209 |
+
Linking lcms2-2.15-hfd0df8a_0
|
| 210 |
+
Linking mpc-1.3.1-hfe3b2da_0
|
| 211 |
+
Linking tbb-2021.12.0-h297d8ca_1
|
| 212 |
+
Linking mkl-2022.1.0-h84fe81f_915
|
| 213 |
+
Linking mkl-devel-2022.1.0-ha770c72_916
|
| 214 |
+
Linking libblas-3.9.0-16_linux64_mkl
|
| 215 |
+
Linking liblapack-3.9.0-16_linux64_mkl
|
| 216 |
+
Linking libcblas-3.9.0-16_linux64_mkl
|
| 217 |
+
Linking liblapacke-3.9.0-16_linux64_mkl
|
| 218 |
+
Linking blas-devel-3.9.0-16_linux64_mkl
|
| 219 |
+
Linking blas-2.116-mkl
|
| 220 |
+
Linking cuda-version-12.5-3
|
| 221 |
+
Linking tzdata-2024a-h0c530f3_0
|
| 222 |
+
Linking libjpeg-turbo-2.0.0-h9bf148f_0
|
| 223 |
+
warning libmamba [libjpeg-turbo-2.0.0-h9bf148f_0] The following files were already present in the environment:
|
| 224 |
+
- bin/cjpeg
|
| 225 |
+
- bin/djpeg
|
| 226 |
+
- bin/jpegtran
|
| 227 |
+
- bin/rdjpgcom
|
| 228 |
+
- bin/wrjpgcom
|
| 229 |
+
- include/jconfig.h
|
| 230 |
+
- include/jerror.h
|
| 231 |
+
- include/jmorecfg.h
|
| 232 |
+
- include/jpeglib.h
|
| 233 |
+
- lib/libjpeg.a
|
| 234 |
+
- lib/libjpeg.so
|
| 235 |
+
- lib/pkgconfig/libjpeg.pc
|
| 236 |
+
- share/man/man1/cjpeg.1
|
| 237 |
+
- share/man/man1/djpeg.1
|
| 238 |
+
- share/man/man1/jpegtran.1
|
| 239 |
+
- share/man/man1/rdjpgcom.1
|
| 240 |
+
- share/man/man1/wrjpgcom.1
|
| 241 |
+
Linking ffmpeg-4.3-hf484d3e_0
|
| 242 |
+
Linking libcurand-10.3.6.39-0
|
| 243 |
+
Linking libcufile-1.10.0.4-0
|
| 244 |
+
Linking cuda-opencl-12.5.39-0
|
| 245 |
+
Linking cuda-libraries-12.1.0-0
|
| 246 |
+
Linking cuda-runtime-12.1.0-0
|
| 247 |
+
Linking python-3.11.9-hb806964_0_cpython
|
| 248 |
+
Linking pytorch-cuda-12.1-ha16c6d3_5
|
| 249 |
+
Linking wheel-0.43.0-pyhd8ed1ab_1
|
| 250 |
+
Linking setuptools-70.1.1-pyhd8ed1ab_0
|
| 251 |
+
Linking pip-24.0-pyhd8ed1ab_0
|
| 252 |
+
Linking pycparser-2.22-pyhd8ed1ab_0
|
| 253 |
+
Linking six-1.16.0-pyh6c4a22f_0
|
| 254 |
+
Linking hyperframe-6.0.1-pyhd8ed1ab_0
|
| 255 |
+
Linking pytz-2024.1-pyhd8ed1ab_0
|
| 256 |
+
Linking python-tzdata-2024.1-pyhd8ed1ab_0
|
| 257 |
+
Linking charset-normalizer-3.3.2-pyhd8ed1ab_0
|
| 258 |
+
Linking hpack-4.0.0-pyh9f0ad1d_0
|
| 259 |
+
Linking pysocks-1.7.1-pyha2e5f31_6
|
| 260 |
+
Linking idna-3.7-pyhd8ed1ab_0
|
| 261 |
+
Linking certifi-2024.6.2-pyhd8ed1ab_0
|
| 262 |
+
Linking mpmath-1.3.0-pyhd8ed1ab_0
|
| 263 |
+
Linking typing_extensions-4.12.2-pyha770c72_0
|
| 264 |
+
Linking networkx-3.3-pyhd8ed1ab_1
|
| 265 |
+
Linking filelock-3.15.4-pyhd8ed1ab_0
|
| 266 |
+
Linking click-8.1.7-unix_pyh707e725_0
|
| 267 |
+
Linking python-dateutil-2.9.0-pyhd8ed1ab_0
|
| 268 |
+
Linking h2-4.1.0-pyhd8ed1ab_0
|
| 269 |
+
Linking brotli-python-1.1.0-py311hb755f60_1
|
| 270 |
+
Linking markupsafe-2.1.5-py311h459d7ec_0
|
| 271 |
+
Linking gmpy2-2.1.5-py311hc4f1f91_1
|
| 272 |
+
Linking pyyaml-6.0.1-py311h459d7ec_1
|
| 273 |
+
Linking pillow-9.4.0-py311h50def17_1
|
| 274 |
+
Linking numpy-2.0.0-py311h1461c94_0
|
| 275 |
+
Linking cffi-1.16.0-py311hb3a22ac_0
|
| 276 |
+
Linking pandas-2.2.2-py311h14de704_1
|
| 277 |
+
Linking zstandard-0.22.0-py311hb6f056b_1
|
| 278 |
+
Linking jinja2-3.1.4-pyhd8ed1ab_0
|
| 279 |
+
Linking sympy-1.12.1-pypyh2585a3b_103
|
| 280 |
+
Linking urllib3-2.2.2-pyhd8ed1ab_1
|
| 281 |
+
Linking requests-2.32.3-pyhd8ed1ab_0
|
| 282 |
+
Linking pytorch-2.3.1-py3.11_cuda12.1_cudnn8.9.2_0
|
| 283 |
+
Linking torchtriton-2.3.1-py311
|
| 284 |
+
Linking torchvision-0.18.1-py311_cu121
|
| 285 |
+
|
| 286 |
+
Transaction finished
|
| 287 |
+
|
| 288 |
+
To activate this environment, use:
|
| 289 |
+
|
| 290 |
+
mamba activate auto-uvapqvk3mmem
|
| 291 |
+
|
| 292 |
+
Or to execute a single command in this environment, use:
|
| 293 |
+
|
| 294 |
+
mamba run -n auto-uvapqvk3mmem mycommand
|
| 295 |
+
|
| 296 |
+
Installing pip packages
|
| 297 |
+
WARNING: The candidate selected for download or install is a yanked version: 'opencv-python' candidate (version 4.5.5.62 at https://files.pythonhosted.org/packages/9d/98/36bfcbff30da27dd6922ed73ca7802c37d87f77daf4c569da3dcb87b4296/opencv_python-4.5.5.62-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from https://pypi.org/simple/opencv-python/) (requires-python:>=3.6))
|
| 298 |
+
Reason for being yanked: deprecated, use 4.5.5.64
|
| 299 |
+
Downloading: "https://download.pytorch.org/models/resnet18-5c106cde.pth" to /home/yuqian_fu/.cache/torch/hub/checkpoints/resnet18-5c106cde.pth
|
| 300 |
+
|
| 301 |
0%| | 0.00/44.7M [00:00<?, ?B/s]
|
| 302 |
77%|███████▋ | 34.4M/44.7M [00:00<00:00, 360MB/s]
|
| 303 |
+
/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/data_loader_joint_v3.py:426: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
|
| 304 |
+
x, y = torch.tensor(x), torch.tensor(y, dtype=torch.long)
|
Meta-causal/code-stage1-pipeline/56452.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Meta-causal/code-stage1-pipeline/56454.error
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
slurmstepd: error: *** JOB 56454 ON gcpl4-eu-1 CANCELLED AT 2024-07-04T06:57:02 DUE TO TIME LIMIT ***
|
| 2 |
+
slurmstepd: error: *** STEP 56454.0 ON gcpl4-eu-1 CANCELLED AT 2024-07-04T06:57:02 DUE TO TIME LIMIT ***
|
| 3 |
+
srun: Job step aborted: Waiting up to 32 seconds for job step to finish.
|
Meta-causal/code-stage1-pipeline/56454.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Meta-causal/code-stage1-pipeline/56455.error
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/data_loader_joint_v3.py:426: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
|
| 2 |
+
x, y = torch.tensor(x), torch.tensor(y, dtype=torch.long)
|
| 3 |
+
/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/data_loader_joint_v3.py:426: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
|
| 4 |
+
x, y = torch.tensor(x), torch.tensor(y, dtype=torch.long)
|
Meta-causal/code-stage1-pipeline/56455.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Meta-causal/code-stage1-pipeline/56456.error
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
slurmstepd: error: *** JOB 56456 ON gcpl4-eu-1 CANCELLED AT 2024-07-04T07:05:01 DUE TO TIME LIMIT ***
|
| 2 |
+
slurmstepd: error: *** STEP 56456.0 ON gcpl4-eu-1 CANCELLED AT 2024-07-04T07:05:01 DUE TO TIME LIMIT ***
|
| 3 |
+
srun: Job step aborted: Waiting up to 32 seconds for job step to finish.
|
Meta-causal/code-stage1-pipeline/56456.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Meta-causal/code-stage1-pipeline/56457.error
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/data_loader_joint_v3.py:426: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
|
| 2 |
+
x, y = torch.tensor(x), torch.tensor(y, dtype=torch.long)
|
| 3 |
+
/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/data_loader_joint_v3.py:426: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
|
| 4 |
+
x, y = torch.tensor(x), torch.tensor(y, dtype=torch.long)
|
Meta-causal/code-stage1-pipeline/56457.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Meta-causal/code-stage1-pipeline/56458.error
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
slurmstepd: error: *** JOB 56458 ON gcpl4-eu-1 CANCELLED AT 2024-07-04T07:07:32 DUE TO TIME LIMIT ***
|
| 2 |
+
slurmstepd: error: *** STEP 56458.0 ON gcpl4-eu-1 CANCELLED AT 2024-07-04T07:07:32 DUE TO TIME LIMIT ***
|
| 3 |
+
srun: Job step aborted: Waiting up to 32 seconds for job step to finish.
|
Meta-causal/code-stage1-pipeline/56458.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Meta-causal/code-stage1-pipeline/56526.error
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Traceback (most recent call last):
|
| 2 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/main_test_pacs_v13.py", line 86, in <module>
|
| 3 |
+
main()
|
| 4 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/click/core.py", line 1157, in __call__
|
| 5 |
+
return self.main(*args, **kwargs)
|
| 6 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 7 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/click/core.py", line 1078, in main
|
| 8 |
+
rv = self.invoke(ctx)
|
| 9 |
+
^^^^^^^^^^^^^^^^
|
| 10 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/click/core.py", line 1434, in invoke
|
| 11 |
+
return ctx.invoke(self.callback, **ctx.params)
|
| 12 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 13 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/click/core.py", line 783, in invoke
|
| 14 |
+
return __callback(*args, **kwargs)
|
| 15 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 16 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/main_test_pacs_v13.py", line 29, in main
|
| 17 |
+
evaluate_pacs(gpu, svroot, source_domain, svpath, factor_num, epoch, stride,eval_mapping, network)
|
| 18 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/main_test_pacs_v13.py", line 42, in evaluate_pacs
|
| 19 |
+
saved_weight = torch.load(os.path.join(svroot, 'best_cls_net.pkl'))
|
| 20 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 21 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/torch/serialization.py", line 997, in load
|
| 22 |
+
with _open_file_like(f, 'rb') as opened_file:
|
| 23 |
+
^^^^^^^^^^^^^^^^^^^^^^^^
|
| 24 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/torch/serialization.py", line 444, in _open_file_like
|
| 25 |
+
return _open_file(name_or_buffer, mode)
|
| 26 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 27 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/torch/serialization.py", line 425, in __init__
|
| 28 |
+
super().__init__(open(name, mode))
|
| 29 |
+
^^^^^^^^^^^^^^^^
|
| 30 |
+
FileNotFoundError: [Errno 2] No such file or directory: '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-PACS//art_painting/CA_multiple_16fa_v2_ep30_lr0.01_cosine_base0.01_bs6_lamCa_1_lamRe1_adt4_cls1_EW2_70_rmTrue_rnTrue_str5/best_cls_net.pkl'
|
| 31 |
+
srun: error: gcpl4-eu-1: task 0: Exited with exit code 1
|
Meta-causal/code-stage1-pipeline/56526.log
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/home/yuqian_fu
|
| 2 |
+
{'gpu': '0', 'svroot': '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-PACS//art_painting/CA_multiple_16fa_v2_ep30_lr0.01_cosine_base0.01_bs6_lamCa_1_lamRe1_adt4_cls1_EW2_70_rmTrue_rnTrue_str5', 'source_domain': 'art_painting', 'svpath': '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-PACS//art_painting/CA_multiple_16fa_v2_ep30_lr0.01_cosine_base0.01_bs6_lamCa_1_lamRe1_adt4_cls1_EW2_70_rmTrue_rnTrue_str5/art_painting_16factor_best_test_check.csv', 'factor_num': 16, 'epoch': 'best', 'stride': 5, 'eval_mapping': False, 'network': 'resnet18'}
|
| 3 |
+
-------------------------------------loading pretrain weights----------------------------------
|
| 4 |
+
loading weight of best
|
Meta-causal/code-stage1-pipeline/56527.error
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Traceback (most recent call last):
|
| 2 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/main_test_digit_v13.py", line 84, in <module>
|
| 3 |
+
main()
|
| 4 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/click/core.py", line 1157, in __call__
|
| 5 |
+
return self.main(*args, **kwargs)
|
| 6 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 7 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/click/core.py", line 1078, in main
|
| 8 |
+
rv = self.invoke(ctx)
|
| 9 |
+
^^^^^^^^^^^^^^^^
|
| 10 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/click/core.py", line 1434, in invoke
|
| 11 |
+
return ctx.invoke(self.callback, **ctx.params)
|
| 12 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 13 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/click/core.py", line 783, in invoke
|
| 14 |
+
return __callback(*args, **kwargs)
|
| 15 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 16 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/main_test_digit_v13.py", line 28, in main
|
| 17 |
+
evaluate_digit(gpu, svroot, svpath, channels, factor_num, stride,epoch, eval_mapping)
|
| 18 |
+
File "/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/main_test_digit_v13.py", line 42, in evaluate_digit
|
| 19 |
+
saved_weight = torch.load(os.path.join(svroot, 'best_cls_net.pkl'))
|
| 20 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 21 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/torch/serialization.py", line 997, in load
|
| 22 |
+
with _open_file_like(f, 'rb') as opened_file:
|
| 23 |
+
^^^^^^^^^^^^^^^^^^^^^^^^
|
| 24 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/torch/serialization.py", line 444, in _open_file_like
|
| 25 |
+
return _open_file(name_or_buffer, mode)
|
| 26 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 27 |
+
File "/scratch/yuqian_fu/micromamba/envs/auto-uvapqvk3mmem/lib/python3.11/site-packages/torch/serialization.py", line 425, in __init__
|
| 28 |
+
super().__init__(open(name, mode))
|
| 29 |
+
^^^^^^^^^^^^^^^^
|
| 30 |
+
FileNotFoundError: [Errno 2] No such file or directory: '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-digit/CA_multiple_14fa_all_ep100_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3/best_cls_net.pkl'
|
| 31 |
+
srun: error: gcpl4-eu-1: task 0: Exited with exit code 1
|
Meta-causal/code-stage1-pipeline/56527.log
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/home/yuqian_fu
|
| 2 |
+
{'gpu': '0', 'svroot': '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-digit/CA_multiple_14fa_all_ep100_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3', 'svpath': '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-digit/CA_multiple_14fa_all_ep100_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3/14factor_best.csv', 'channels': 3, 'factor_num': 14, 'stride': 3, 'epoch': 'best', 'eval_mapping': True}
|
| 3 |
+
loading weight of best
|
Meta-causal/code-stage1-pipeline/56528.error
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
run_my_joint_v13_test.sh: line 25: ndm: command not found
|
| 2 |
+
/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/data_loader_joint_v3.py:426: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
|
| 3 |
+
x, y = torch.tensor(x), torch.tensor(y, dtype=torch.long)
|
Meta-causal/code-stage1-pipeline/56528.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Meta-causal/code-stage1-pipeline/56529.error
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
run_my_joint_test.sh: line 24: randm: command not found
|
Meta-causal/code-stage1-pipeline/56529.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Meta-causal/code-stage1-pipeline/56540.error
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/data_loader_joint_v3.py:426: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
|
| 2 |
+
x, y = torch.tensor(x), torch.tensor(y, dtype=torch.long)
|
| 3 |
+
/home/yuqian_fu/Projects/CausalStyleAdv/Meta-causal/code-stage1-pipeline/data_loader_joint_v3.py:426: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
|
| 4 |
+
x, y = torch.tensor(x), torch.tensor(y, dtype=torch.long)
|
Meta-causal/code-stage1-pipeline/56540.log
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/home/yuqian_fu
|
| 2 |
+
{'gpu': '0', 'data': 'art_painting', 'ntr': None, 'translate': None, 'autoaug': 'CA_multiple', 'n': 3, 'stride': 5, 'factor_num': 16, 'epochs': 30, 'nbatch': 100, 'batchsize': 6, 'lr': 0.01, 'lr_scheduler': 'cosine', 'svroot': '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-PACS//art_painting/CA_multiple_16fa_v2_ep30_lr0.01_cosine_base0.01_bs6_lamCa_1_lamRe1_adt4_cls1_EW2_70_rmTrue_rnTrue_str5_pipelineAugWoNorm', 'clsadapt': True, 'lambda_causal': 1.0, 'lambda_re': 1.0, 'randm': True, 'randn': True, 'network': 'resnet18'}
|
| 3 |
+
/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/PACS/art_painting_train.hdf5 torch.Size([1840, 3, 227, 227]) torch.Size([1840])
|
| 4 |
+
--------------------------CA_multiple--------------------------
|
| 5 |
+
---------------------------16 factors-----------------
|
| 6 |
+
randm: True
|
| 7 |
+
randn: True
|
| 8 |
+
n: 3
|
| 9 |
+
randm: False
|
| 10 |
+
/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/PACS/art_painting_val.hdf5 torch.Size([208, 3, 227, 227]) torch.Size([208])
|
| 11 |
+
-------------------------------------loading pretrain weights----------------------------------
|
| 12 |
+
306
|
| 13 |
+
0.0001
|
| 14 |
+
changing lr
|
| 15 |
+
---------------------saving model at epoch 0----------------------------------------------------
|
| 16 |
+
epoch 0, time 328.72, cls_loss 8.5176
|
| 17 |
+
306
|
| 18 |
+
9.972609476841367e-05
|
| 19 |
+
changing lr
|
| 20 |
+
---------------------saving model at epoch 1----------------------------------------------------
|
| 21 |
+
epoch 1, time 323.89, cls_loss 4.1852
|
| 22 |
+
306
|
| 23 |
+
9.890738003669029e-05
|
| 24 |
+
changing lr
|
| 25 |
+
---------------------saving model at epoch 2----------------------------------------------------
|
| 26 |
+
epoch 2, time 321.19, cls_loss 1.7210
|
| 27 |
+
306
|
| 28 |
+
9.755282581475769e-05
|
| 29 |
+
changing lr
|
| 30 |
+
---------------------saving model at epoch 3----------------------------------------------------
|
| 31 |
+
epoch 3, time 321.77, cls_loss 0.8687
|
| 32 |
+
306
|
| 33 |
+
9.567727288213003e-05
|
| 34 |
+
changing lr
|
| 35 |
+
epoch 4, time 322.73, cls_loss 0.5508
|
| 36 |
+
306
|
| 37 |
+
9.330127018922194e-05
|
| 38 |
+
changing lr
|
| 39 |
+
epoch 5, time 321.65, cls_loss 0.4191
|
| 40 |
+
306
|
| 41 |
+
9.045084971874738e-05
|
| 42 |
+
changing lr
|
| 43 |
+
epoch 6, time 323.65, cls_loss 0.3904
|
| 44 |
+
306
|
| 45 |
+
8.715724127386972e-05
|
| 46 |
+
changing lr
|
| 47 |
+
---------------------saving model at epoch 7----------------------------------------------------
|
| 48 |
+
epoch 7, time 324.01, cls_loss 0.2998
|
| 49 |
+
306
|
| 50 |
+
8.345653031794292e-05
|
| 51 |
+
changing lr
|
| 52 |
+
epoch 8, time 327.47, cls_loss 0.2084
|
| 53 |
+
306
|
| 54 |
+
7.938926261462366e-05
|
| 55 |
+
changing lr
|
| 56 |
+
epoch 9, time 326.22, cls_loss 0.1815
|
| 57 |
+
306
|
| 58 |
+
7.500000000000001e-05
|
| 59 |
+
changing lr
|
| 60 |
+
epoch 10, time 325.03, cls_loss 0.1476
|
| 61 |
+
306
|
| 62 |
+
7.033683215379003e-05
|
| 63 |
+
changing lr
|
| 64 |
+
epoch 11, time 325.92, cls_loss 0.1094
|
| 65 |
+
306
|
| 66 |
+
6.545084971874738e-05
|
| 67 |
+
changing lr
|
| 68 |
+
---------------------saving model at epoch 12----------------------------------------------------
|
| 69 |
+
epoch 12, time 322.71, cls_loss 0.0653
|
| 70 |
+
306
|
| 71 |
+
6.039558454088797e-05
|
| 72 |
+
changing lr
|
| 73 |
+
epoch 13, time 321.20, cls_loss 0.0639
|
| 74 |
+
306
|
| 75 |
+
5.522642316338269e-05
|
| 76 |
+
changing lr
|
| 77 |
+
---------------------saving model at epoch 14----------------------------------------------------
|
| 78 |
+
epoch 14, time 322.13, cls_loss 0.0464
|
| 79 |
+
306
|
| 80 |
+
5.000000000000002e-05
|
| 81 |
+
changing lr
|
| 82 |
+
epoch 15, time 324.52, cls_loss 0.0402
|
| 83 |
+
306
|
| 84 |
+
4.4773576836617344e-05
|
| 85 |
+
changing lr
|
| 86 |
+
epoch 16, time 322.56, cls_loss 0.0452
|
| 87 |
+
306
|
| 88 |
+
3.9604415459112035e-05
|
| 89 |
+
changing lr
|
| 90 |
+
epoch 17, time 322.39, cls_loss 0.0403
|
| 91 |
+
306
|
| 92 |
+
3.4549150281252636e-05
|
| 93 |
+
changing lr
|
| 94 |
+
epoch 18, time 324.36, cls_loss 0.0190
|
| 95 |
+
306
|
| 96 |
+
2.966316784621e-05
|
| 97 |
+
changing lr
|
| 98 |
+
epoch 19, time 327.79, cls_loss 0.0250
|
| 99 |
+
306
|
| 100 |
+
2.5000000000000015e-05
|
| 101 |
+
changing lr
|
| 102 |
+
epoch 20, time 322.28, cls_loss 0.0416
|
| 103 |
+
306
|
| 104 |
+
2.0610737385376352e-05
|
| 105 |
+
changing lr
|
| 106 |
+
epoch 21, time 322.98, cls_loss 0.0203
|
| 107 |
+
306
|
| 108 |
+
1.654346968205711e-05
|
| 109 |
+
changing lr
|
| 110 |
+
---------------------saving model at epoch 22----------------------------------------------------
|
| 111 |
+
epoch 22, time 325.56, cls_loss 0.0271
|
| 112 |
+
306
|
| 113 |
+
1.2842758726130304e-05
|
| 114 |
+
changing lr
|
| 115 |
+
epoch 23, time 321.58, cls_loss 0.0190
|
| 116 |
+
306
|
| 117 |
+
9.549150281252636e-06
|
| 118 |
+
changing lr
|
| 119 |
+
epoch 24, time 327.21, cls_loss 0.0236
|
| 120 |
+
306
|
| 121 |
+
6.698729810778068e-06
|
| 122 |
+
changing lr
|
| 123 |
+
epoch 25, time 301.36, cls_loss 0.0107
|
| 124 |
+
306
|
| 125 |
+
4.322727117869953e-06
|
| 126 |
+
changing lr
|
| 127 |
+
epoch 26, time 295.79, cls_loss 0.0165
|
| 128 |
+
306
|
| 129 |
+
2.447174185242324e-06
|
| 130 |
+
changing lr
|
| 131 |
+
epoch 27, time 296.53, cls_loss 0.0218
|
| 132 |
+
306
|
| 133 |
+
1.092619963309716e-06
|
| 134 |
+
changing lr
|
| 135 |
+
epoch 28, time 297.90, cls_loss 0.0198
|
| 136 |
+
306
|
| 137 |
+
2.7390523158633003e-07
|
| 138 |
+
changing lr
|
| 139 |
+
epoch 29, time 299.73, cls_loss 0.0094
|
| 140 |
+
---------------------saving last model at epoch 29----------------------------------------------------
|
| 141 |
+
/home/yuqian_fu
|
| 142 |
+
{'gpu': '0', 'svroot': '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-PACS//art_painting/CA_multiple_16fa_v2_ep30_lr0.01_cosine_base0.01_bs6_lamCa_1_lamRe1_adt4_cls1_EW2_70_rmTrue_rnTrue_str5_pipelineAugWoNorm', 'source_domain': 'art_painting', 'svpath': '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-PACS//art_painting/CA_multiple_16fa_v2_ep30_lr0.01_cosine_base0.01_bs6_lamCa_1_lamRe1_adt4_cls1_EW2_70_rmTrue_rnTrue_str5_pipelineAugWoNorm/art_painting_16factor_best_test_check.csv', 'factor_num': 16, 'epoch': 'best', 'stride': 5, 'eval_mapping': False, 'network': 'resnet18'}
|
| 143 |
+
-------------------------------------loading pretrain weights----------------------------------
|
| 144 |
+
loading weight of best
|
| 145 |
+
columns: ['art_painting', 'cartoon', 'photo', 'sketch']
|
| 146 |
+
/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/PACS/art_painting_test.hdf5 torch.Size([2048, 3, 227, 227]) torch.Size([2048])
|
| 147 |
+
/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/PACS/cartoon_test.hdf5 torch.Size([2344, 3, 227, 227]) torch.Size([2344])
|
| 148 |
+
/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/PACS/photo_test.hdf5 torch.Size([1670, 3, 227, 227]) torch.Size([1670])
|
| 149 |
+
/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/PACS/sketch_test.hdf5 torch.Size([3929, 3, 227, 227]) torch.Size([3929])
|
| 150 |
+
art_painting cartoon photo sketch Avg
|
| 151 |
+
w/o do (original x) 93.017578 53.412969 88.203593 45.838636 62.485066
|
Meta-causal/code-stage1-pipeline/56541.error
ADDED
|
File without changes
|
Meta-causal/code-stage1-pipeline/56541.log
ADDED
|
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/home/yuqian_fu
|
| 2 |
+
{'gpu': '0', 'data': 'mnist', 'ntr': None, 'translate': None, 'autoaug': 'CA_multiple', 'n': 3, 'stride': 3, 'factor_num': 14, 'epochs': 100, 'nbatch': 100, 'batchsize': 32, 'lr': 0.0001, 'lr_scheduler': 'Step', 'svroot': '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-digit/CA_multiple_14fa_all_ep100_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3_pipelineAugWoNorm', 'clsadapt': True, 'lambda_causal': 1.0, 'lambda_re': 1.0, 'randm': True, 'randn': True, 'network': 'resnet18'}
|
| 3 |
+
--------------------------CA_multiple--------------------------
|
| 4 |
+
---------------------------14 factors-----------------
|
| 5 |
+
randm: True
|
| 6 |
+
randn: True
|
| 7 |
+
n: 3
|
| 8 |
+
randm: False
|
| 9 |
+
100
|
| 10 |
+
0.0001
|
| 11 |
+
changing lr
|
| 12 |
+
---------------------saving model at epoch 0----------------------------------------------------
|
| 13 |
+
epoch 0, time 183.25, cls_loss 2.1515
|
| 14 |
+
100
|
| 15 |
+
0.0001
|
| 16 |
+
changing lr
|
| 17 |
+
---------------------saving model at epoch 1----------------------------------------------------
|
| 18 |
+
epoch 1, time 183.70, cls_loss 1.7865
|
| 19 |
+
100
|
| 20 |
+
0.0001
|
| 21 |
+
changing lr
|
| 22 |
+
epoch 2, time 183.59, cls_loss 1.5733
|
| 23 |
+
100
|
| 24 |
+
0.0001
|
| 25 |
+
changing lr
|
| 26 |
+
---------------------saving model at epoch 3----------------------------------------------------
|
| 27 |
+
epoch 3, time 183.31, cls_loss 1.4407
|
| 28 |
+
100
|
| 29 |
+
0.0001
|
| 30 |
+
changing lr
|
| 31 |
+
---------------------saving model at epoch 4----------------------------------------------------
|
| 32 |
+
epoch 4, time 183.01, cls_loss 1.3369
|
| 33 |
+
100
|
| 34 |
+
0.0001
|
| 35 |
+
changing lr
|
| 36 |
+
---------------------saving model at epoch 5----------------------------------------------------
|
| 37 |
+
epoch 5, time 183.85, cls_loss 1.3080
|
| 38 |
+
100
|
| 39 |
+
0.0001
|
| 40 |
+
changing lr
|
| 41 |
+
epoch 6, time 182.43, cls_loss 1.2082
|
| 42 |
+
100
|
| 43 |
+
0.0001
|
| 44 |
+
changing lr
|
| 45 |
+
---------------------saving model at epoch 7----------------------------------------------------
|
| 46 |
+
epoch 7, time 182.60, cls_loss 1.1517
|
| 47 |
+
100
|
| 48 |
+
0.0001
|
| 49 |
+
changing lr
|
| 50 |
+
---------------------saving model at epoch 8----------------------------------------------------
|
| 51 |
+
epoch 8, time 183.05, cls_loss 1.0938
|
| 52 |
+
100
|
| 53 |
+
0.0001
|
| 54 |
+
changing lr
|
| 55 |
+
---------------------saving model at epoch 9----------------------------------------------------
|
| 56 |
+
epoch 9, time 183.04, cls_loss 1.0485
|
| 57 |
+
100
|
| 58 |
+
0.0001
|
| 59 |
+
changing lr
|
| 60 |
+
epoch 10, time 182.31, cls_loss 1.0636
|
| 61 |
+
100
|
| 62 |
+
0.0001
|
| 63 |
+
changing lr
|
| 64 |
+
epoch 11, time 182.08, cls_loss 0.9913
|
| 65 |
+
100
|
| 66 |
+
0.0001
|
| 67 |
+
changing lr
|
| 68 |
+
epoch 12, time 182.44, cls_loss 0.9240
|
| 69 |
+
100
|
| 70 |
+
0.0001
|
| 71 |
+
changing lr
|
| 72 |
+
---------------------saving model at epoch 13----------------------------------------------------
|
| 73 |
+
epoch 13, time 182.56, cls_loss 0.8962
|
| 74 |
+
100
|
| 75 |
+
0.0001
|
| 76 |
+
changing lr
|
| 77 |
+
epoch 14, time 182.83, cls_loss 0.8474
|
| 78 |
+
100
|
| 79 |
+
0.0001
|
| 80 |
+
changing lr
|
| 81 |
+
epoch 15, time 182.24, cls_loss 0.8730
|
| 82 |
+
100
|
| 83 |
+
0.0001
|
| 84 |
+
changing lr
|
| 85 |
+
---------------------saving model at epoch 16----------------------------------------------------
|
| 86 |
+
epoch 16, time 182.40, cls_loss 0.8184
|
| 87 |
+
100
|
| 88 |
+
0.0001
|
| 89 |
+
changing lr
|
| 90 |
+
epoch 17, time 182.12, cls_loss 0.8083
|
| 91 |
+
100
|
| 92 |
+
0.0001
|
| 93 |
+
changing lr
|
| 94 |
+
epoch 18, time 182.02, cls_loss 0.7381
|
| 95 |
+
100
|
| 96 |
+
0.0001
|
| 97 |
+
changing lr
|
| 98 |
+
epoch 19, time 182.19, cls_loss 0.7326
|
| 99 |
+
100
|
| 100 |
+
0.0001
|
| 101 |
+
changing lr
|
| 102 |
+
epoch 20, time 181.69, cls_loss 0.6649
|
| 103 |
+
100
|
| 104 |
+
0.0001
|
| 105 |
+
changing lr
|
| 106 |
+
epoch 21, time 181.62, cls_loss 0.6849
|
| 107 |
+
100
|
| 108 |
+
0.0001
|
| 109 |
+
changing lr
|
| 110 |
+
epoch 22, time 181.68, cls_loss 0.6675
|
| 111 |
+
100
|
| 112 |
+
0.0001
|
| 113 |
+
changing lr
|
| 114 |
+
---------------------saving model at epoch 23----------------------------------------------------
|
| 115 |
+
epoch 23, time 182.29, cls_loss 0.6101
|
| 116 |
+
100
|
| 117 |
+
0.0001
|
| 118 |
+
changing lr
|
| 119 |
+
epoch 24, time 182.13, cls_loss 0.6237
|
| 120 |
+
100
|
| 121 |
+
0.0001
|
| 122 |
+
changing lr
|
| 123 |
+
epoch 25, time 182.23, cls_loss 0.6229
|
| 124 |
+
100
|
| 125 |
+
0.0001
|
| 126 |
+
changing lr
|
| 127 |
+
epoch 26, time 182.24, cls_loss 0.5664
|
| 128 |
+
100
|
| 129 |
+
0.0001
|
| 130 |
+
changing lr
|
| 131 |
+
epoch 27, time 182.13, cls_loss 0.5588
|
| 132 |
+
100
|
| 133 |
+
0.0001
|
| 134 |
+
changing lr
|
| 135 |
+
epoch 28, time 182.14, cls_loss 0.5539
|
| 136 |
+
100
|
| 137 |
+
0.0001
|
| 138 |
+
changing lr
|
| 139 |
+
epoch 29, time 182.35, cls_loss 0.5198
|
| 140 |
+
100
|
| 141 |
+
0.0001
|
| 142 |
+
changing lr
|
| 143 |
+
epoch 30, time 182.22, cls_loss 0.5153
|
| 144 |
+
100
|
| 145 |
+
0.0001
|
| 146 |
+
changing lr
|
| 147 |
+
epoch 31, time 182.36, cls_loss 0.4764
|
| 148 |
+
100
|
| 149 |
+
0.0001
|
| 150 |
+
changing lr
|
| 151 |
+
epoch 32, time 182.13, cls_loss 0.4748
|
| 152 |
+
100
|
| 153 |
+
0.0001
|
| 154 |
+
changing lr
|
| 155 |
+
epoch 33, time 181.83, cls_loss 0.4448
|
| 156 |
+
100
|
| 157 |
+
0.0001
|
| 158 |
+
changing lr
|
| 159 |
+
epoch 34, time 182.32, cls_loss 0.4358
|
| 160 |
+
100
|
| 161 |
+
0.0001
|
| 162 |
+
changing lr
|
| 163 |
+
epoch 35, time 181.92, cls_loss 0.4201
|
| 164 |
+
100
|
| 165 |
+
0.0001
|
| 166 |
+
changing lr
|
| 167 |
+
epoch 36, time 181.91, cls_loss 0.3949
|
| 168 |
+
100
|
| 169 |
+
0.0001
|
| 170 |
+
changing lr
|
| 171 |
+
epoch 37, time 182.01, cls_loss 0.3818
|
| 172 |
+
100
|
| 173 |
+
0.0001
|
| 174 |
+
changing lr
|
| 175 |
+
---------------------saving model at epoch 38----------------------------------------------------
|
| 176 |
+
epoch 38, time 182.02, cls_loss 0.3651
|
| 177 |
+
100
|
| 178 |
+
0.0001
|
| 179 |
+
changing lr
|
| 180 |
+
epoch 39, time 182.07, cls_loss 0.3656
|
| 181 |
+
100
|
| 182 |
+
0.0001
|
| 183 |
+
changing lr
|
| 184 |
+
epoch 40, time 181.87, cls_loss 0.3864
|
| 185 |
+
100
|
| 186 |
+
0.0001
|
| 187 |
+
changing lr
|
| 188 |
+
epoch 41, time 182.33, cls_loss 0.3647
|
| 189 |
+
100
|
| 190 |
+
0.0001
|
| 191 |
+
changing lr
|
| 192 |
+
epoch 42, time 182.58, cls_loss 0.3301
|
| 193 |
+
100
|
| 194 |
+
0.0001
|
| 195 |
+
changing lr
|
| 196 |
+
---------------------saving model at epoch 43----------------------------------------------------
|
| 197 |
+
epoch 43, time 182.56, cls_loss 0.3279
|
| 198 |
+
100
|
| 199 |
+
0.0001
|
| 200 |
+
changing lr
|
| 201 |
+
epoch 44, time 185.15, cls_loss 0.3470
|
| 202 |
+
100
|
| 203 |
+
0.0001
|
| 204 |
+
changing lr
|
| 205 |
+
epoch 45, time 182.28, cls_loss 0.2938
|
| 206 |
+
100
|
| 207 |
+
0.0001
|
| 208 |
+
changing lr
|
| 209 |
+
epoch 46, time 182.03, cls_loss 0.2920
|
| 210 |
+
100
|
| 211 |
+
0.0001
|
| 212 |
+
changing lr
|
| 213 |
+
epoch 47, time 182.53, cls_loss 0.2780
|
| 214 |
+
100
|
| 215 |
+
0.0001
|
| 216 |
+
changing lr
|
| 217 |
+
epoch 48, time 182.87, cls_loss 0.2592
|
| 218 |
+
100
|
| 219 |
+
0.0001
|
| 220 |
+
changing lr
|
| 221 |
+
epoch 49, time 182.61, cls_loss 0.2725
|
| 222 |
+
100
|
| 223 |
+
0.0001
|
| 224 |
+
changing lr
|
| 225 |
+
epoch 50, time 182.34, cls_loss 0.2344
|
| 226 |
+
100
|
| 227 |
+
0.0001
|
| 228 |
+
changing lr
|
| 229 |
+
epoch 51, time 182.13, cls_loss 0.2686
|
| 230 |
+
100
|
| 231 |
+
0.0001
|
| 232 |
+
changing lr
|
| 233 |
+
epoch 52, time 183.03, cls_loss 0.2475
|
| 234 |
+
100
|
| 235 |
+
0.0001
|
| 236 |
+
changing lr
|
| 237 |
+
epoch 53, time 182.25, cls_loss 0.2359
|
| 238 |
+
100
|
| 239 |
+
0.0001
|
| 240 |
+
changing lr
|
| 241 |
+
epoch 54, time 182.39, cls_loss 0.2279
|
| 242 |
+
100
|
| 243 |
+
0.0001
|
| 244 |
+
changing lr
|
| 245 |
+
epoch 55, time 182.38, cls_loss 0.2340
|
| 246 |
+
100
|
| 247 |
+
0.0001
|
| 248 |
+
changing lr
|
| 249 |
+
epoch 56, time 182.19, cls_loss 0.2217
|
| 250 |
+
100
|
| 251 |
+
0.0001
|
| 252 |
+
changing lr
|
| 253 |
+
epoch 57, time 182.01, cls_loss 0.2188
|
| 254 |
+
100
|
| 255 |
+
0.0001
|
| 256 |
+
changing lr
|
| 257 |
+
epoch 58, time 182.23, cls_loss 0.2269
|
| 258 |
+
100
|
| 259 |
+
0.0001
|
| 260 |
+
changing lr
|
| 261 |
+
epoch 59, time 182.47, cls_loss 0.2212
|
| 262 |
+
100
|
| 263 |
+
0.0001
|
| 264 |
+
changing lr
|
| 265 |
+
epoch 60, time 182.34, cls_loss 0.1887
|
| 266 |
+
100
|
| 267 |
+
0.0001
|
| 268 |
+
changing lr
|
| 269 |
+
epoch 61, time 182.11, cls_loss 0.1859
|
| 270 |
+
100
|
| 271 |
+
0.0001
|
| 272 |
+
changing lr
|
| 273 |
+
epoch 62, time 182.40, cls_loss 0.2021
|
| 274 |
+
100
|
| 275 |
+
0.0001
|
| 276 |
+
changing lr
|
| 277 |
+
epoch 63, time 182.09, cls_loss 0.1756
|
| 278 |
+
100
|
| 279 |
+
0.0001
|
| 280 |
+
changing lr
|
| 281 |
+
epoch 64, time 182.38, cls_loss 0.1737
|
| 282 |
+
100
|
| 283 |
+
0.0001
|
| 284 |
+
changing lr
|
| 285 |
+
epoch 65, time 182.21, cls_loss 0.1648
|
| 286 |
+
100
|
| 287 |
+
0.0001
|
| 288 |
+
changing lr
|
| 289 |
+
epoch 66, time 182.02, cls_loss 0.1613
|
| 290 |
+
100
|
| 291 |
+
0.0001
|
| 292 |
+
changing lr
|
| 293 |
+
epoch 67, time 182.29, cls_loss 0.1569
|
| 294 |
+
100
|
| 295 |
+
0.0001
|
| 296 |
+
changing lr
|
| 297 |
+
epoch 68, time 182.29, cls_loss 0.1487
|
| 298 |
+
100
|
| 299 |
+
0.0001
|
| 300 |
+
changing lr
|
| 301 |
+
---------------------saving model at epoch 69----------------------------------------------------
|
| 302 |
+
epoch 69, time 182.61, cls_loss 0.1538
|
| 303 |
+
100
|
| 304 |
+
0.0001
|
| 305 |
+
changing lr
|
| 306 |
+
epoch 70, time 182.28, cls_loss 0.1653
|
| 307 |
+
100
|
| 308 |
+
0.0001
|
| 309 |
+
changing lr
|
| 310 |
+
epoch 71, time 181.94, cls_loss 0.1639
|
| 311 |
+
100
|
| 312 |
+
0.0001
|
| 313 |
+
changing lr
|
| 314 |
+
epoch 72, time 181.84, cls_loss 0.1784
|
| 315 |
+
100
|
| 316 |
+
0.0001
|
| 317 |
+
changing lr
|
| 318 |
+
epoch 73, time 181.70, cls_loss 0.1843
|
| 319 |
+
100
|
| 320 |
+
0.0001
|
| 321 |
+
changing lr
|
| 322 |
+
epoch 74, time 180.53, cls_loss 0.1832
|
| 323 |
+
100
|
| 324 |
+
0.0001
|
| 325 |
+
changing lr
|
| 326 |
+
epoch 75, time 180.51, cls_loss 0.1421
|
| 327 |
+
100
|
| 328 |
+
0.0001
|
| 329 |
+
changing lr
|
| 330 |
+
epoch 76, time 180.07, cls_loss 0.1224
|
| 331 |
+
100
|
| 332 |
+
0.0001
|
| 333 |
+
changing lr
|
| 334 |
+
epoch 77, time 180.21, cls_loss 0.1187
|
| 335 |
+
100
|
| 336 |
+
0.0001
|
| 337 |
+
changing lr
|
| 338 |
+
epoch 78, time 180.07, cls_loss 0.1058
|
| 339 |
+
100
|
| 340 |
+
0.0001
|
| 341 |
+
changing lr
|
| 342 |
+
epoch 79, time 180.76, cls_loss 0.1301
|
| 343 |
+
100
|
| 344 |
+
1e-05
|
| 345 |
+
changing lr
|
| 346 |
+
---------------------saving model at epoch 80----------------------------------------------------
|
| 347 |
+
epoch 80, time 181.07, cls_loss 0.0915
|
| 348 |
+
100
|
| 349 |
+
1e-05
|
| 350 |
+
changing lr
|
| 351 |
+
epoch 81, time 180.00, cls_loss 0.0845
|
| 352 |
+
100
|
| 353 |
+
1e-05
|
| 354 |
+
changing lr
|
| 355 |
+
epoch 82, time 180.09, cls_loss 0.0767
|
| 356 |
+
100
|
| 357 |
+
1e-05
|
| 358 |
+
changing lr
|
| 359 |
+
epoch 83, time 180.14, cls_loss 0.0711
|
| 360 |
+
100
|
| 361 |
+
1e-05
|
| 362 |
+
changing lr
|
| 363 |
+
epoch 84, time 180.25, cls_loss 0.0698
|
| 364 |
+
100
|
| 365 |
+
1e-05
|
| 366 |
+
changing lr
|
| 367 |
+
epoch 85, time 180.12, cls_loss 0.0682
|
| 368 |
+
100
|
| 369 |
+
1e-05
|
| 370 |
+
changing lr
|
| 371 |
+
epoch 86, time 179.91, cls_loss 0.0590
|
| 372 |
+
100
|
| 373 |
+
1e-05
|
| 374 |
+
changing lr
|
| 375 |
+
epoch 87, time 179.84, cls_loss 0.0607
|
| 376 |
+
100
|
| 377 |
+
1e-05
|
| 378 |
+
changing lr
|
| 379 |
+
epoch 88, time 179.82, cls_loss 0.0634
|
| 380 |
+
100
|
| 381 |
+
1e-05
|
| 382 |
+
changing lr
|
| 383 |
+
epoch 89, time 180.04, cls_loss 0.0718
|
| 384 |
+
100
|
| 385 |
+
1e-05
|
| 386 |
+
changing lr
|
| 387 |
+
epoch 90, time 179.62, cls_loss 0.0704
|
| 388 |
+
100
|
| 389 |
+
1e-05
|
| 390 |
+
changing lr
|
| 391 |
+
epoch 91, time 179.77, cls_loss 0.0669
|
| 392 |
+
100
|
| 393 |
+
1e-05
|
| 394 |
+
changing lr
|
| 395 |
+
epoch 92, time 179.87, cls_loss 0.0574
|
| 396 |
+
100
|
| 397 |
+
1e-05
|
| 398 |
+
changing lr
|
| 399 |
+
epoch 93, time 179.66, cls_loss 0.0556
|
| 400 |
+
100
|
| 401 |
+
1e-05
|
| 402 |
+
changing lr
|
| 403 |
+
epoch 94, time 179.87, cls_loss 0.0631
|
| 404 |
+
100
|
| 405 |
+
1e-05
|
| 406 |
+
changing lr
|
| 407 |
+
epoch 95, time 179.67, cls_loss 0.0525
|
| 408 |
+
100
|
| 409 |
+
1e-05
|
| 410 |
+
changing lr
|
| 411 |
+
epoch 96, time 179.69, cls_loss 0.0473
|
| 412 |
+
100
|
| 413 |
+
1e-05
|
| 414 |
+
changing lr
|
| 415 |
+
epoch 97, time 179.39, cls_loss 0.0470
|
| 416 |
+
100
|
| 417 |
+
1e-05
|
| 418 |
+
changing lr
|
| 419 |
+
epoch 98, time 179.75, cls_loss 0.0529
|
| 420 |
+
100
|
| 421 |
+
1e-05
|
| 422 |
+
changing lr
|
| 423 |
+
epoch 99, time 180.06, cls_loss 0.0541
|
| 424 |
+
---------------------saving last model at epoch 99----------------------------------------------------
|
| 425 |
+
/home/yuqian_fu
|
| 426 |
+
{'gpu': '0', 'svroot': '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-digit/CA_multiple_14fa_all_ep100_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3_pipelineAugWoNorm', 'svpath': '/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-digit/CA_multiple_14fa_all_ep100_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3_pipelineAugWoNorm/14factor_best.csv', 'channels': 3, 'factor_num': 14, 'stride': 3, 'epoch': 'best', 'eval_mapping': True}
|
| 427 |
+
loading weight of best
|
| 428 |
+
Using downloaded and verified file: /home/yuqian_fu/.pytorch/SVHN/test_32x32.mat
|
| 429 |
+
mnist svhn ... usps Avg
|
| 430 |
+
w/o do (original x) 93.89 13.579441 ... 89.436971 40.16719
|
| 431 |
+
|
| 432 |
+
[1 rows x 6 columns]
|
Meta-causal/code-stage1-pipeline/AllEpochs_test_digit_v13.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from torch.utils.data import DataLoader
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import numpy as np
|
| 9 |
+
import click
|
| 10 |
+
import pandas as pd
|
| 11 |
+
|
| 12 |
+
from network import mnist_net_my as mnist_net
|
| 13 |
+
from network import adaptor_v2
|
| 14 |
+
from tools import causalaugment_v3 as causalaugment
|
| 15 |
+
from main_my_joint_v13_auto import evaluate
|
| 16 |
+
import data_loader_joint_v3 as data_loader
|
| 17 |
+
|
| 18 |
+
@click.command()
|
| 19 |
+
@click.option('--gpu', type=str, default='0', help='选择GPU编号')
|
| 20 |
+
@click.option('--svroot', type=str, default='./saved')
|
| 21 |
+
@click.option('--svpath', type=str, default=None, help='保存日志的路径')
|
| 22 |
+
@click.option('--channels', type=int, default=3)
|
| 23 |
+
@click.option('--factor_num', type=int, default=16)
|
| 24 |
+
@click.option('--stride', type=int, default=16)
|
| 25 |
+
@click.option('--epoch', type=str, default='best')
|
| 26 |
+
@click.option('--eval_mapping', type=bool, default=True, help='是否查看mapping学习效果')
|
| 27 |
+
def main(gpu, svroot, svpath, channels, factor_num,stride, epoch, eval_mapping):
|
| 28 |
+
evaluate_digit(gpu, svroot, svpath, channels, factor_num, stride,epoch, eval_mapping)
|
| 29 |
+
|
| 30 |
+
def evaluate_digit(gpu, svroot, svpath, channels=3, factor_num=16,stride=5,epoch='best', eval_mapping=True):
|
| 31 |
+
settings = locals().copy()
|
| 32 |
+
print(settings)
|
| 33 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
|
| 34 |
+
|
| 35 |
+
# 加载分类模型
|
| 36 |
+
if channels == 3:
|
| 37 |
+
cls_net = mnist_net.ConvNet().cuda()
|
| 38 |
+
elif channels == 1:
|
| 39 |
+
cls_net = mnist_net.ConvNet(imdim=channels).cuda()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
epoch_list = []
|
| 43 |
+
file_list = os.listdir(svroot)
|
| 44 |
+
for file in file_list:
|
| 45 |
+
if('.pkl' in file):
|
| 46 |
+
epoch_list.append(file)
|
| 47 |
+
print('epoch_list:', epoch_list)
|
| 48 |
+
|
| 49 |
+
'''
|
| 50 |
+
if epoch == 'best':
|
| 51 |
+
print("loading weight of %s"%(epoch))
|
| 52 |
+
saved_weight = torch.load(os.path.join(svroot, 'best_cls_net.pkl'))
|
| 53 |
+
elif epoch == 'last':
|
| 54 |
+
print("loading weight of %s"%(epoch))
|
| 55 |
+
saved_weight = torch.load(os.path.join(svroot, 'last_cls_net.pkl'))
|
| 56 |
+
'''
|
| 57 |
+
|
| 58 |
+
for epoch_file in epoch_list:
|
| 59 |
+
print("loading weight of %s"%(epoch_file))
|
| 60 |
+
saved_weight = torch.load(os.path.join(svroot, epoch_file))
|
| 61 |
+
|
| 62 |
+
cls_net.load_state_dict(saved_weight)
|
| 63 |
+
cls_net.eval()
|
| 64 |
+
|
| 65 |
+
# 测试
|
| 66 |
+
str2fun = {
|
| 67 |
+
'mnist': data_loader.load_mnist,
|
| 68 |
+
'mnist_m': data_loader.load_mnist_m,
|
| 69 |
+
'usps': data_loader.load_usps,
|
| 70 |
+
'svhn': data_loader.load_svhn,
|
| 71 |
+
'syndigit': data_loader.load_syndigit,
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
columns = ['mnist', 'svhn', 'mnist_m', 'syndigit','usps']
|
| 75 |
+
target = ['svhn', 'mnist_m', 'syndigit','usps']
|
| 76 |
+
|
| 77 |
+
index = ['w/o do (original x)']
|
| 78 |
+
data_result = {}
|
| 79 |
+
|
| 80 |
+
for idx, data in enumerate(columns):
|
| 81 |
+
teset = str2fun[data]('test', channels=channels)
|
| 82 |
+
teloader = DataLoader(teset, batch_size=8, num_workers=0)
|
| 83 |
+
# 计算评价指标
|
| 84 |
+
teacc = evaluate(cls_net, teloader)
|
| 85 |
+
if data == 'mnist':
|
| 86 |
+
acc_avg = np.zeros(teacc.shape)
|
| 87 |
+
else:
|
| 88 |
+
acc_avg = acc_avg + teacc
|
| 89 |
+
data_result[data] = teacc
|
| 90 |
+
acc_avg = acc_avg/float(len(target))
|
| 91 |
+
|
| 92 |
+
data_result['Avg'] = acc_avg
|
| 93 |
+
|
| 94 |
+
df = pd.DataFrame(data_result,index = index)
|
| 95 |
+
print(df)
|
| 96 |
+
if svpath is not None:
|
| 97 |
+
df.to_csv(svpath)
|
| 98 |
+
|
| 99 |
+
if __name__=='__main__':
|
| 100 |
+
main()
|
| 101 |
+
|
Meta-causal/code-stage1-pipeline/AllEpochs_test_pacs_v13.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from torch.utils.data import DataLoader
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import numpy as np
|
| 9 |
+
import click
|
| 10 |
+
import pandas as pd
|
| 11 |
+
|
| 12 |
+
from network import resnet as resnet
|
| 13 |
+
from network import adaptor_v2
|
| 14 |
+
from tools import causalaugment_v3 as causalaugment
|
| 15 |
+
from main_my_joint_v13_auto import evaluate
|
| 16 |
+
import data_loader_joint_v3 as data_loader
|
| 17 |
+
|
| 18 |
+
@click.command()
|
| 19 |
+
@click.option('--gpu', type=str, default='0', help='选择GPU编号')
|
| 20 |
+
@click.option('--svroot', type=str, default='./saved')
|
| 21 |
+
@click.option('--source_domain', type=str, default='art_painting', help='source domain')
|
| 22 |
+
@click.option('--svpath', type=str, default=None, help='保存日志的路径')
|
| 23 |
+
@click.option('--factor_num', type=int, default=16)
|
| 24 |
+
@click.option('--epoch', type=str, default='best')
|
| 25 |
+
@click.option('--stride', type=int, default=5)
|
| 26 |
+
@click.option('--eval_mapping', type=bool, default=False, help='是否查看mapping学习效果')
|
| 27 |
+
@click.option('--network', type=str, default='resnet18', help='项目文件保存路径')
|
| 28 |
+
def main(gpu, svroot, source_domain, svpath, factor_num, epoch, stride,eval_mapping, network):
|
| 29 |
+
evaluate_pacs(gpu, svroot, source_domain, svpath, factor_num, epoch, stride,eval_mapping, network)
|
| 30 |
+
|
| 31 |
+
def evaluate_pacs(gpu, svroot, source_domain, svpath, factor_num=16, epoch='best', stride=5,eval_mapping=False, network='resnet18'):
|
| 32 |
+
settings = locals().copy()
|
| 33 |
+
print(settings)
|
| 34 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
|
| 35 |
+
|
| 36 |
+
# 加载分类模型
|
| 37 |
+
if network == 'resnet18':
|
| 38 |
+
cls_net = resnet.resnet18(classes=7,c_dim=2048).cuda()
|
| 39 |
+
input_dim = 2048
|
| 40 |
+
|
| 41 |
+
epoch_list = []
|
| 42 |
+
file_list = os.listdir(svroot)
|
| 43 |
+
for file in file_list:
|
| 44 |
+
if('.pkl' in file):
|
| 45 |
+
epoch_list.append(file)
|
| 46 |
+
print('epoch_list:', epoch_list)
|
| 47 |
+
|
| 48 |
+
'''
|
| 49 |
+
if epoch == 'best':
|
| 50 |
+
print("loading weight of %s"%(epoch))
|
| 51 |
+
saved_weight = torch.load(os.path.join(svroot, 'best_cls_net.pkl'))
|
| 52 |
+
elif epoch == 'last':
|
| 53 |
+
print("loading weight of %s"%(epoch))
|
| 54 |
+
saved_weight = torch.load(os.path.join(svroot, 'last_cls_net.pkl'))
|
| 55 |
+
'''
|
| 56 |
+
|
| 57 |
+
for epoch_file in epoch_list:
|
| 58 |
+
print("loading weight of %s"%(epoch_file))
|
| 59 |
+
saved_weight = torch.load(os.path.join(svroot, epoch_file))
|
| 60 |
+
|
| 61 |
+
cls_net.load_state_dict(saved_weight)
|
| 62 |
+
cls_net.eval()
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
columns = ['art_painting', 'cartoon', 'photo', 'sketch']
|
| 66 |
+
target = [i for i in columns if i!=source_domain]
|
| 67 |
+
columns = [source_domain] + target
|
| 68 |
+
print("columns:",columns)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
index = ['w/o do (original x)']
|
| 72 |
+
|
| 73 |
+
data_result = {}
|
| 74 |
+
data_result_ours = {}
|
| 75 |
+
|
| 76 |
+
for idx, data in enumerate(columns):
|
| 77 |
+
teset = data_loader.load_pacs(data, 'test')
|
| 78 |
+
teloader = DataLoader(teset, batch_size=4, num_workers=0)
|
| 79 |
+
# 计算评价指标
|
| 80 |
+
acc = evaluate(cls_net, teloader)
|
| 81 |
+
data_result_ours[data] = acc
|
| 82 |
+
|
| 83 |
+
teacc = evaluate(cls_net, teloader)
|
| 84 |
+
if data == source_domain:
|
| 85 |
+
acc_avg = np.zeros(teacc.shape)
|
| 86 |
+
else:
|
| 87 |
+
acc_avg = acc_avg + teacc
|
| 88 |
+
data_result[data] = teacc
|
| 89 |
+
acc_avg = acc_avg/float(len(target))
|
| 90 |
+
|
| 91 |
+
data_result['Avg'] = acc_avg
|
| 92 |
+
|
| 93 |
+
df = pd.DataFrame(data_result,index = index)
|
| 94 |
+
print(df)
|
| 95 |
+
|
| 96 |
+
if svpath is not None:
|
| 97 |
+
df.to_csv(svpath)
|
| 98 |
+
|
| 99 |
+
if __name__=='__main__':
|
| 100 |
+
main()
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
|
Meta-causal/code-stage1-pipeline/data_loader_joint_v3.py
ADDED
|
@@ -0,0 +1,861 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
''' Digit 实验
|
| 2 |
+
'''
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from torch.utils.data import Dataset, TensorDataset
|
| 6 |
+
from torchvision import transforms
|
| 7 |
+
from torchvision.datasets import MNIST, SVHN, CIFAR10, STL10, USPS
|
| 8 |
+
|
| 9 |
+
import os
|
| 10 |
+
import pickle
|
| 11 |
+
import numpy as np
|
| 12 |
+
import h5py
|
| 13 |
+
#import cv2
|
| 14 |
+
from scipy.io import loadmat
|
| 15 |
+
from PIL import Image
|
| 16 |
+
|
| 17 |
+
from tools.autoaugment import SVHNPolicy, CIFAR10Policy
|
| 18 |
+
from tools.randaugment import RandAugment
|
| 19 |
+
from tools.causalaugment_v3 import RandAugment_incausal, FactualAugment_incausal, CounterfactualAugment_incausal, MultiCounterfactualAugment_incausal
|
| 20 |
+
|
| 21 |
+
from PIL import ImageEnhance
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
transformtypedict=dict(Brightness=ImageEnhance.Brightness, Contrast=ImageEnhance.Contrast, Sharpness=ImageEnhance.Sharpness, Color=ImageEnhance.Color)
|
| 25 |
+
|
| 26 |
+
class ImageJitterforX(object):
|
| 27 |
+
'''
|
| 28 |
+
from StyleAdv dataaug
|
| 29 |
+
'''
|
| 30 |
+
def __init__(self, transformdict):
|
| 31 |
+
self.transforms = [(transformtypedict[k], transformdict[k]) for k in transformdict]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def __call__(self, img):
|
| 35 |
+
out = img
|
| 36 |
+
randtensor = torch.rand(len(self.transforms))
|
| 37 |
+
|
| 38 |
+
for i, (transformer, alpha) in enumerate(self.transforms):
|
| 39 |
+
r = alpha*(randtensor[i]*2.0 -1.0) + 1
|
| 40 |
+
out = transformer(out).enhance(r).convert('RGB')
|
| 41 |
+
|
| 42 |
+
return out
|
| 43 |
+
|
| 44 |
+
class TransformLoaderforX:
|
| 45 |
+
'''
|
| 46 |
+
from StyleAdv dataaug
|
| 47 |
+
'''
|
| 48 |
+
def __init__(self, image_size,
|
| 49 |
+
normalize_param = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 50 |
+
jitter_param = dict(Brightness=0.4, Contrast=0.4, Color=0.4)):
|
| 51 |
+
self.image_size = image_size
|
| 52 |
+
self.normalize_param = normalize_param
|
| 53 |
+
self.jitter_param = jitter_param
|
| 54 |
+
|
| 55 |
+
def parse_transform(self, transform_type):
|
| 56 |
+
if transform_type=='ImageJitter':
|
| 57 |
+
method = ImageJitterforX( self.jitter_param )
|
| 58 |
+
return method
|
| 59 |
+
method = getattr(transforms, transform_type)
|
| 60 |
+
|
| 61 |
+
if transform_type=='RandomResizedCrop':
|
| 62 |
+
return method(self.image_size)
|
| 63 |
+
elif transform_type=='CenterCrop':
|
| 64 |
+
return method(self.image_size)
|
| 65 |
+
elif transform_type=='Resize':
|
| 66 |
+
return method([int(self.image_size*1.15), int(self.image_size*1.15)])
|
| 67 |
+
elif transform_type=='Normalize':
|
| 68 |
+
return method(**self.normalize_param )
|
| 69 |
+
else:
|
| 70 |
+
return method()
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def get_composed_transform(self, aug = False):
|
| 74 |
+
if aug:
|
| 75 |
+
#transform_list = ['RandomResizedCrop', 'ImageJitter', 'RandomHorizontalFlip', 'ToTensor', 'Normalize']
|
| 76 |
+
transform_list = ['RandomResizedCrop', 'ImageJitter', 'RandomHorizontalFlip', 'ToTensor']
|
| 77 |
+
else:
|
| 78 |
+
#transform_list = ['Resize','CenterCrop', 'ToTensor', 'Normalize']
|
| 79 |
+
#transform_list = ['ToTensor', 'Normalize']
|
| 80 |
+
transform_list = ['ToTensor']
|
| 81 |
+
|
| 82 |
+
tranform0 = [transforms.ToPILImage()]
|
| 83 |
+
transform_funcs = [ self.parse_transform(x) for x in transform_list]
|
| 84 |
+
tranform_all = tranform0 + transform_funcs
|
| 85 |
+
transform = transforms.Compose(tranform_all)
|
| 86 |
+
return transform
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class myTensorDataset(Dataset):
|
| 90 |
+
def __init__(self, x, y, transform=None, transform2=None, transform3=None, twox=False):
|
| 91 |
+
self.x = x
|
| 92 |
+
self.y = y
|
| 93 |
+
self.transform = transform
|
| 94 |
+
self.transform2 = transform2
|
| 95 |
+
self.transform3 = transform3
|
| 96 |
+
self.twox = twox
|
| 97 |
+
def __len__(self):
|
| 98 |
+
return len(self.x)
|
| 99 |
+
|
| 100 |
+
def __getitem__(self, index):
|
| 101 |
+
x = self.x[index]
|
| 102 |
+
y = self.y[index]
|
| 103 |
+
c, h, w =x.shape
|
| 104 |
+
if self.transform is not None:
|
| 105 |
+
x_RA = self.transform(x)
|
| 106 |
+
# print("x_RA.shape:",x_RA.shape)
|
| 107 |
+
if self.transform3 is not None:
|
| 108 |
+
x_CA = self.transform3(x_RA)
|
| 109 |
+
x_CA = x_CA.reshape(-1,c,h,w)
|
| 110 |
+
# print("x_CA.shape:",x_CA.shape)
|
| 111 |
+
if self.transform2 is not None:
|
| 112 |
+
x_FA = self.transform2(x)
|
| 113 |
+
# x_FA = x_FA.view(c,13,h,w)
|
| 114 |
+
x_FA = x_FA.reshape(-1,c,h,w)
|
| 115 |
+
# print("x_FA_in getitem.shape:",x_FA.shape)
|
| 116 |
+
# print("x_FA.shape:",x_FA.shape)
|
| 117 |
+
|
| 118 |
+
return (x, x_RA, x_FA, x_CA), y
|
| 119 |
+
else:
|
| 120 |
+
return (x, x_RA, x_CA), y
|
| 121 |
+
else:
|
| 122 |
+
if self.transform2 is not None:
|
| 123 |
+
x_FA = self.transform2(x)
|
| 124 |
+
x_FA = x_FA.reshape(-1,c,h,w)
|
| 125 |
+
return (x, x_RA, x_FA), y
|
| 126 |
+
else:
|
| 127 |
+
if self.twox:
|
| 128 |
+
return (x, x_RA), y
|
| 129 |
+
else:
|
| 130 |
+
x_RA = self.transform(x)
|
| 131 |
+
return x_RA, y
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
HOME = os.environ['HOME']
|
| 135 |
+
print(HOME)
|
| 136 |
+
def resize_imgs(x, size):
|
| 137 |
+
''' 目前只能处理单通道
|
| 138 |
+
x [n, 28, 28]
|
| 139 |
+
size int
|
| 140 |
+
'''
|
| 141 |
+
resize_x = np.zeros([x.shape[0], size, size])
|
| 142 |
+
for i, im in enumerate(x):
|
| 143 |
+
im = Image.fromarray(im)
|
| 144 |
+
im = im.resize([size, size], Image.ANTIALIAS)
|
| 145 |
+
resize_x[i] = np.asarray(im)
|
| 146 |
+
return resize_x
|
| 147 |
+
|
| 148 |
+
def load_mnist(split='train', translate=None, twox=False, ntr=None, autoaug=None, factor_num=16, randm=False,randn=False,channels=3,n=3,stride=5):
|
| 149 |
+
'''
|
| 150 |
+
autoaug == 'AA', AutoAugment
|
| 151 |
+
'FastAA', Fast AutoAugment
|
| 152 |
+
'RA', RandAugment
|
| 153 |
+
channels == 3 默认返回 rgb 3通道图像
|
| 154 |
+
1 返回单通道图像
|
| 155 |
+
'''
|
| 156 |
+
#path = f'data/mnist-{split}.pkl'
|
| 157 |
+
path = f'/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/minst-{split}.pkl'
|
| 158 |
+
if not os.path.exists(path):
|
| 159 |
+
dataset = MNIST(f'{HOME}/.pytorch/MNIST', train=(split=='train'), download=True)
|
| 160 |
+
x, y = dataset.data, dataset.targets
|
| 161 |
+
if split=='train':
|
| 162 |
+
x, y = x[0:10000], y[0:10000]
|
| 163 |
+
x = torch.tensor(resize_imgs(x.numpy(), 32))
|
| 164 |
+
x = (x.float()/255.).unsqueeze(1).repeat(1,3,1,1)
|
| 165 |
+
with open(path, 'wb') as f:
|
| 166 |
+
pickle.dump([x, y], f)
|
| 167 |
+
with open(path, 'rb') as f:
|
| 168 |
+
# print("reading!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
|
| 169 |
+
x, y = pickle.load(f)
|
| 170 |
+
if channels == 1:
|
| 171 |
+
x = x[:,0:1,:,:]
|
| 172 |
+
|
| 173 |
+
if ntr is not None:
|
| 174 |
+
x, y = x[0:ntr], y[0:ntr]
|
| 175 |
+
|
| 176 |
+
# 如果没有数据增强
|
| 177 |
+
if (translate is None) and (autoaug is None):
|
| 178 |
+
dataset = TensorDataset(x, y)
|
| 179 |
+
return dataset
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
#fuyuqian: add styleadv-style aug
|
| 183 |
+
transform_x_train = TransformLoaderforX((x.shape[-2], x.shape[-1])).get_composed_transform(aug=True)
|
| 184 |
+
transform_x_test = TransformLoaderforX((x.shape[-2], x.shape[-1])).get_composed_transform(aug=False)
|
| 185 |
+
if(split == 'train'):
|
| 186 |
+
transformed_images = []
|
| 187 |
+
for img in x:
|
| 188 |
+
img = transform_x_train(img) # Apply transform to each image
|
| 189 |
+
transformed_images.append(img)
|
| 190 |
+
x = torch.stack(transformed_images)
|
| 191 |
+
#print('x_aug train here', x.shape)
|
| 192 |
+
else:
|
| 193 |
+
transformed_images = []
|
| 194 |
+
for img in x:
|
| 195 |
+
img = transform_x_test(img) # Apply transform to each image
|
| 196 |
+
transformed_images.append(img)
|
| 197 |
+
x = torch.stack(transformed_images)
|
| 198 |
+
#print('x_aug test here', x.shape)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
transform = [transforms.ToPILImage()]
|
| 203 |
+
transform_single_factor = [transforms.ToPILImage()]
|
| 204 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple':
|
| 205 |
+
transform_CA = [transforms.ToPILImage()]
|
| 206 |
+
if translate is not None:
|
| 207 |
+
transform.append(transforms.RandomAffine(0, [translate, translate]))
|
| 208 |
+
transform_single_factor.append(transforms.RandomAffine(0, [translate, translate]))
|
| 209 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple':
|
| 210 |
+
transform_CA.append(transforms.RandomAffine(0, [translate, translate]))
|
| 211 |
+
if autoaug is not None:
|
| 212 |
+
if autoaug == 'CA':
|
| 213 |
+
print("--------------------------CA--------------------------")
|
| 214 |
+
print("n:",n)
|
| 215 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 216 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 217 |
+
transform_CA.append(CounterfactualAugment_incausal(factor_num))
|
| 218 |
+
elif autoaug == 'CA_multiple':
|
| 219 |
+
print("--------------------------CA_multiple--------------------------")
|
| 220 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 221 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 222 |
+
transform_CA.append(MultiCounterfactualAugment_incausal(factor_num, stride))
|
| 223 |
+
elif autoaug == 'Ours_A':
|
| 224 |
+
print("--------------------------Ours_Augment--------------------------")
|
| 225 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 226 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 227 |
+
|
| 228 |
+
transform.append(transforms.ToTensor())
|
| 229 |
+
transform = transforms.Compose(transform)
|
| 230 |
+
transform_single_factor.append(transforms.ToTensor())
|
| 231 |
+
transform_single_factor = transforms.Compose(transform_single_factor)
|
| 232 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple':
|
| 233 |
+
transform_CA.append(transforms.ToTensor())
|
| 234 |
+
transform_CA = transforms.Compose(transform_CA)
|
| 235 |
+
dataset = myTensorDataset(x, y, transform=transform, transform2=transform_single_factor, transform3=transform_CA,twox=twox)
|
| 236 |
+
else:
|
| 237 |
+
dataset = myTensorDataset(x, y, transform=transform, transform2=transform_single_factor, twox=twox)
|
| 238 |
+
# print(x.shape)
|
| 239 |
+
# print(y.shape)
|
| 240 |
+
return dataset
|
| 241 |
+
|
| 242 |
+
def load_cifar10(split='train', translate=None, twox=False, autoaug=None, factor_num=16, randm=False,randn=False,channels=3,n=3,stride=5):
|
| 243 |
+
dataset = CIFAR10(f'{HOME}/.pytorch/CIFAR10', train=(split=='train'), download=True)
|
| 244 |
+
x, y = dataset.data, dataset.targets
|
| 245 |
+
x = x.transpose(0,3,1,2)
|
| 246 |
+
|
| 247 |
+
x, y = torch.tensor(x), torch.tensor(y)
|
| 248 |
+
x = x.float()/255.
|
| 249 |
+
print(x.shape,y.shape)
|
| 250 |
+
if (translate is None) and (autoaug is None):
|
| 251 |
+
dataset = TensorDataset(x, y)
|
| 252 |
+
return dataset
|
| 253 |
+
#x.transpose(0,3,1,2)
|
| 254 |
+
|
| 255 |
+
# 数据增强管道
|
| 256 |
+
transform = [transforms.ToPILImage()]
|
| 257 |
+
transform_single_factor = [transforms.ToPILImage()]
|
| 258 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple':
|
| 259 |
+
transform_CA = [transforms.ToPILImage()]
|
| 260 |
+
if translate is not None:
|
| 261 |
+
transform.append(transforms.RandomAffine(0, [translate, translate]))
|
| 262 |
+
transform_single_factor.append(transforms.RandomAffine(0, [translate, translate]))
|
| 263 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple':
|
| 264 |
+
transform_CA.append(transforms.RandomAffine(0, [translate, translate]))
|
| 265 |
+
if autoaug is not None:
|
| 266 |
+
if autoaug == 'CA':
|
| 267 |
+
print("--------------------------CA--------------------------")
|
| 268 |
+
print("n:",n)
|
| 269 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 270 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 271 |
+
transform_CA.append(CounterfactualAugment_incausal(factor_num))
|
| 272 |
+
elif autoaug == 'CA_multiple':
|
| 273 |
+
print("--------------------------CA_multiple--------------------------")
|
| 274 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 275 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 276 |
+
transform_CA.append(MultiCounterfactualAugment_incausal(factor_num, stride))
|
| 277 |
+
elif autoaug == 'Ours_A':
|
| 278 |
+
print("--------------------------Ours_Augment--------------------------")
|
| 279 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 280 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 281 |
+
|
| 282 |
+
transform.append(transforms.ToTensor())
|
| 283 |
+
transform = transforms.Compose(transform)
|
| 284 |
+
transform_single_factor.append(transforms.ToTensor())
|
| 285 |
+
transform_single_factor = transforms.Compose(transform_single_factor)
|
| 286 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple':
|
| 287 |
+
transform_CA.append(transforms.ToTensor())
|
| 288 |
+
transform_CA = transforms.Compose(transform_CA)
|
| 289 |
+
dataset = myTensorDataset(x, y, transform=transform, transform2=transform_single_factor, transform3=transform_CA,twox=twox)
|
| 290 |
+
else:
|
| 291 |
+
dataset = myTensorDataset(x, y, transform=transform, transform2=transform_single_factor, twox=twox)
|
| 292 |
+
# print(x.shape)
|
| 293 |
+
# print(y.shape)
|
| 294 |
+
return dataset
|
| 295 |
+
def load_IMG(task='S-U', translate=None, twox=False, autoaug=None, factor_num=16, randm=False,randn=False,channels=3,n=3,stride=5):
|
| 296 |
+
# path = f'data/img2vid/{domain}/stanford40_12.npz'
|
| 297 |
+
if task == 'S-U':
|
| 298 |
+
path = f'data/img2vid/{task}/stanford40_12.npz'
|
| 299 |
+
elif task == 'E-H':
|
| 300 |
+
path = f'data/img2vid/{task}/EAD50_13.npz'
|
| 301 |
+
print(path)
|
| 302 |
+
dataset = np.load(path)
|
| 303 |
+
x, y = dataset['x'], dataset['y']
|
| 304 |
+
b, g, r = np.split(x,3,axis=-1)
|
| 305 |
+
x = np.concatenate((r,g,b),axis=-1)
|
| 306 |
+
x = x.transpose(0,3,1,2)
|
| 307 |
+
x, y = torch.tensor(x), torch.tensor(y, dtype=torch.long)
|
| 308 |
+
x = x.float()/255.
|
| 309 |
+
print(path,x.shape,y.shape)
|
| 310 |
+
# for i in range(20):
|
| 311 |
+
# img_temp = transforms.ToPILImage()(x[i])
|
| 312 |
+
# img_temp.save('data/PACS/debug_images/img_pil_'+domain+'_'+split+'_'+str(i)+'.png')
|
| 313 |
+
if (translate is None) and (autoaug is None):
|
| 314 |
+
dataset = TensorDataset(x, y)
|
| 315 |
+
return dataset
|
| 316 |
+
|
| 317 |
+
#x.transpose(0,3,1,2)
|
| 318 |
+
|
| 319 |
+
# 数据增强管道
|
| 320 |
+
transform = [transforms.ToPILImage()]
|
| 321 |
+
if autoaug != 'CA_multiple_noSingle':
|
| 322 |
+
transform_single_factor = [transforms.ToPILImage()]
|
| 323 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple' or autoaug == 'CA_multiple_noSingle':
|
| 324 |
+
transform_CA = [transforms.ToPILImage()]
|
| 325 |
+
if translate is not None:
|
| 326 |
+
transform.append(transforms.RandomAffine(0, [translate, translate]))
|
| 327 |
+
if autoaug != 'CA_multiple_noSingle':
|
| 328 |
+
transform_single_factor.append(transforms.RandomAffine(0, [translate, translate]))
|
| 329 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple' or autoaug == 'CA_multiple_noSingle':
|
| 330 |
+
transform_CA.append(transforms.RandomAffine(0, [translate, translate]))
|
| 331 |
+
if autoaug is not None:
|
| 332 |
+
if autoaug == 'CA':
|
| 333 |
+
print("--------------------------CA--------------------------")
|
| 334 |
+
print("n:",n)
|
| 335 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 336 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 337 |
+
transform_CA.append(CounterfactualAugment_incausal(factor_num))
|
| 338 |
+
elif autoaug == 'CA_multiple':
|
| 339 |
+
print("--------------------------CA_multiple--------------------------")
|
| 340 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 341 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 342 |
+
transform_CA.append(MultiCounterfactualAugment_incausal(factor_num, stride))
|
| 343 |
+
elif autoaug == 'CA_multiple_noSingle':
|
| 344 |
+
print("--------------------------CA_multiple_noSingle--------------------------")
|
| 345 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 346 |
+
# transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 347 |
+
transform_CA.append(MultiCounterfactualAugment_incausal(factor_num, stride))
|
| 348 |
+
elif autoaug == 'Ours_A':
|
| 349 |
+
print("--------------------------Ours_Augment--------------------------")
|
| 350 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 351 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 352 |
+
|
| 353 |
+
transform.append(transforms.ToTensor())
|
| 354 |
+
transform = transforms.Compose(transform)
|
| 355 |
+
if autoaug != 'CA_multiple_noSingle':
|
| 356 |
+
transform_single_factor.append(transforms.ToTensor())
|
| 357 |
+
transform_single_factor = transforms.Compose(transform_single_factor)
|
| 358 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple':
|
| 359 |
+
transform_CA.append(transforms.ToTensor())
|
| 360 |
+
transform_CA = transforms.Compose(transform_CA)
|
| 361 |
+
dataset = myTensorDataset(x, y, transform=transform, transform2=transform_single_factor, transform3=transform_CA,twox=twox)
|
| 362 |
+
elif autoaug == 'CA_multiple_noSingle':
|
| 363 |
+
transform_CA.append(transforms.ToTensor())
|
| 364 |
+
transform_CA = transforms.Compose(transform_CA)
|
| 365 |
+
dataset = myTensorDataset(x, y, transform=transform, transform3=transform_CA,twox=twox)
|
| 366 |
+
else:
|
| 367 |
+
dataset = myTensorDataset(x, y, transform=transform, transform2=transform_single_factor, twox=twox)
|
| 368 |
+
# print(x.shape)
|
| 369 |
+
# print(y.shape)
|
| 370 |
+
return dataset
|
| 371 |
+
|
| 372 |
+
def load_VID(task='S-U',split='1'):
|
| 373 |
+
if task == 'S-U':
|
| 374 |
+
path = f'data/img2vid/{task}/ucf101_12_frame_sample8_{split}.npz'
|
| 375 |
+
elif task == 'E-H':
|
| 376 |
+
path = f'data/img2vid/{task}/hmdb51_13_frame_sample8_{split}.npz'
|
| 377 |
+
dataset = np.load(path)
|
| 378 |
+
print(path)
|
| 379 |
+
x, y = dataset['x'], dataset['y']
|
| 380 |
+
b, g, r = np.split(x,3,axis=-1)
|
| 381 |
+
x = np.concatenate((r,g,b),axis=-1)
|
| 382 |
+
x = x.transpose(0,3,1,2)
|
| 383 |
+
x, y = torch.tensor(x), torch.tensor(y, dtype=torch.long)
|
| 384 |
+
x = x.float()/255.
|
| 385 |
+
print(path,x.shape,y.shape)
|
| 386 |
+
# for i in range(20):
|
| 387 |
+
# img_temp = transforms.ToPILImage()(x[i])
|
| 388 |
+
# img_temp.save('data/PACS/debug_images/img_pil_'+domain+'_'+split+'_'+str(i)+'.png')
|
| 389 |
+
dataset = TensorDataset(x, y)
|
| 390 |
+
return dataset
|
| 391 |
+
|
| 392 |
+
def load_pacs(domain='photo', split='train', translate=None, twox=False, autoaug=None, factor_num=16, randm=False,randn=False,channels=3,n=3,stride=5):
|
| 393 |
+
#path = f'data/PACS/{domain}_{split}.hdf5'
|
| 394 |
+
path = f'/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/PACS/{domain}_{split}.hdf5'
|
| 395 |
+
dataset = h5py.File(path, 'r')
|
| 396 |
+
x, y = dataset['images'], dataset['labels']
|
| 397 |
+
#for i in range(20):
|
| 398 |
+
# cv2.imwrite('data/PACS/debug_images/img_cv2_'+domain+'_'+split+'_'+str(i)+'.png', x[i])
|
| 399 |
+
b, g, r = np.split(x,3,axis=-1)
|
| 400 |
+
x = np.concatenate((r,g,b),axis=-1)
|
| 401 |
+
|
| 402 |
+
#x = x.transpose(0,3,1,2)
|
| 403 |
+
# Convert image data to uint8
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
#fuyuqian: add styleadv-style aug
|
| 407 |
+
x = x.astype(np.uint8)
|
| 408 |
+
transform_x_train = TransformLoaderforX((x.shape[-3], x.shape[-2])).get_composed_transform(aug=True)
|
| 409 |
+
transform_x_test = TransformLoaderforX((x.shape[-3], x.shape[-2])).get_composed_transform(aug=False)
|
| 410 |
+
if(split == 'train'):
|
| 411 |
+
transformed_images = []
|
| 412 |
+
for img in x:
|
| 413 |
+
img = transform_x_train(img) # Apply transform to each image
|
| 414 |
+
transformed_images.append(img)
|
| 415 |
+
x = torch.stack(transformed_images)
|
| 416 |
+
#print('x_aug train here', x.shape)
|
| 417 |
+
else:
|
| 418 |
+
transformed_images = []
|
| 419 |
+
for img in x:
|
| 420 |
+
img = transform_x_test(img) # Apply transform to each image
|
| 421 |
+
transformed_images.append(img)
|
| 422 |
+
x = torch.stack(transformed_images)
|
| 423 |
+
#print('x_aug test here', x.shape)
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
x, y = torch.tensor(x), torch.tensor(y, dtype=torch.long)
|
| 427 |
+
|
| 428 |
+
y = y - 1
|
| 429 |
+
x = x.float()/255.
|
| 430 |
+
print(path,x.shape,y.shape)
|
| 431 |
+
# for i in range(20):
|
| 432 |
+
# img_temp = transforms.ToPILImage()(x[i])
|
| 433 |
+
# img_temp.save('data/PACS/debug_images/img_pil_'+domain+'_'+split+'_'+str(i)+'.png')
|
| 434 |
+
if (translate is None) and (autoaug is None):
|
| 435 |
+
dataset = TensorDataset(x, y)
|
| 436 |
+
return dataset
|
| 437 |
+
#x.transpose(0,3,1,2)
|
| 438 |
+
|
| 439 |
+
# 数据增强管道
|
| 440 |
+
transform = [transforms.ToPILImage()]
|
| 441 |
+
if autoaug != 'CA_multiple_noSingle':
|
| 442 |
+
transform_single_factor = [transforms.ToPILImage()]
|
| 443 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple' or autoaug == 'CA_multiple_noSingle':
|
| 444 |
+
transform_CA = [transforms.ToPILImage()]
|
| 445 |
+
if translate is not None:
|
| 446 |
+
transform.append(transforms.RandomAffine(0, [translate, translate]))
|
| 447 |
+
if autoaug != 'CA_multiple_noSingle':
|
| 448 |
+
transform_single_factor.append(transforms.RandomAffine(0, [translate, translate]))
|
| 449 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple' or autoaug == 'CA_multiple_noSingle':
|
| 450 |
+
transform_CA.append(transforms.RandomAffine(0, [translate, translate]))
|
| 451 |
+
if autoaug is not None:
|
| 452 |
+
if autoaug == 'CA':
|
| 453 |
+
print("--------------------------CA--------------------------")
|
| 454 |
+
print("n:",n)
|
| 455 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 456 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 457 |
+
transform_CA.append(CounterfactualAugment_incausal(factor_num))
|
| 458 |
+
elif autoaug == 'CA_multiple':
|
| 459 |
+
print("--------------------------CA_multiple--------------------------")
|
| 460 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 461 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 462 |
+
transform_CA.append(MultiCounterfactualAugment_incausal(factor_num, stride))
|
| 463 |
+
elif autoaug == 'CA_multiple_noSingle':
|
| 464 |
+
print("--------------------------CA_multiple_noSingle--------------------------")
|
| 465 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 466 |
+
# transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 467 |
+
transform_CA.append(MultiCounterfactualAugment_incausal(factor_num, stride))
|
| 468 |
+
elif autoaug == 'Ours_A':
|
| 469 |
+
print("--------------------------Ours_Augment--------------------------")
|
| 470 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 471 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 472 |
+
|
| 473 |
+
transform.append(transforms.ToTensor())
|
| 474 |
+
transform = transforms.Compose(transform)
|
| 475 |
+
if autoaug != 'CA_multiple_noSingle':
|
| 476 |
+
transform_single_factor.append(transforms.ToTensor())
|
| 477 |
+
transform_single_factor = transforms.Compose(transform_single_factor)
|
| 478 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple':
|
| 479 |
+
transform_CA.append(transforms.ToTensor())
|
| 480 |
+
transform_CA = transforms.Compose(transform_CA)
|
| 481 |
+
dataset = myTensorDataset(x, y, transform=transform, transform2=transform_single_factor, transform3=transform_CA,twox=twox)
|
| 482 |
+
elif autoaug == 'CA_multiple_noSingle':
|
| 483 |
+
transform_CA.append(transforms.ToTensor())
|
| 484 |
+
transform_CA = transforms.Compose(transform_CA)
|
| 485 |
+
dataset = myTensorDataset(x, y, transform=transform, transform3=transform_CA,twox=twox)
|
| 486 |
+
else:
|
| 487 |
+
dataset = myTensorDataset(x, y, transform=transform, transform2=transform_single_factor, twox=twox)
|
| 488 |
+
# print(x.shape)
|
| 489 |
+
# print(y.shape)
|
| 490 |
+
return dataset
|
| 491 |
+
|
| 492 |
+
def read_dataset(domain, split):
|
| 493 |
+
path = f'/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/PACS/{domain}_{split}.hdf5'
|
| 494 |
+
dataset = h5py.File(path, 'r')
|
| 495 |
+
x_temp, y_temp = dataset['images'], dataset['labels']
|
| 496 |
+
b, g, r = np.split(x_temp,3,axis=-1)
|
| 497 |
+
x_temp = np.concatenate((r,g,b),axis=-1)
|
| 498 |
+
x_temp = x_temp.transpose(0,3,1,2)
|
| 499 |
+
x_temp, y_temp = torch.tensor(x_temp), torch.tensor(y_temp, dtype=torch.long)
|
| 500 |
+
y_temp = y_temp - 1
|
| 501 |
+
x_temp = x_temp.float()/255.
|
| 502 |
+
return x_temp, y_temp
|
| 503 |
+
|
| 504 |
+
def load_pacs_multi(target_domain=['photo'], split='train', translate=None, twox=False, autoaug=None, factor_num=16, randm=False,randn=False,channels=3,n=3,stride=5):
|
| 505 |
+
domains = ['art_painting', 'cartoon', 'photo', 'sketch']
|
| 506 |
+
source_domain = [i for i in domains if i != target_domain]
|
| 507 |
+
for i in range(len(source_domain)):
|
| 508 |
+
x_temp, y_temp = read_dataset(source_domain[i],split=split)
|
| 509 |
+
print(x_temp.shape,y_temp.shape)
|
| 510 |
+
if i == 0:
|
| 511 |
+
x = x_temp.clone()
|
| 512 |
+
y = y_temp.clone()
|
| 513 |
+
else:
|
| 514 |
+
x = torch.cat([x,x_temp],0)
|
| 515 |
+
y = torch.cat([y,y_temp],0)
|
| 516 |
+
print(x.shape,y.shape)
|
| 517 |
+
if (translate is None) and (autoaug is None):
|
| 518 |
+
dataset = TensorDataset(x, y)
|
| 519 |
+
return dataset
|
| 520 |
+
#x.transpose(0,3,1,2)
|
| 521 |
+
|
| 522 |
+
# 数据增强管道
|
| 523 |
+
transform = [transforms.ToPILImage()]
|
| 524 |
+
if autoaug != 'CA_multiple_noSingle':
|
| 525 |
+
transform_single_factor = [transforms.ToPILImage()]
|
| 526 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple' or autoaug == 'CA_multiple_noSingle':
|
| 527 |
+
transform_CA = [transforms.ToPILImage()]
|
| 528 |
+
if translate is not None:
|
| 529 |
+
transform.append(transforms.RandomAffine(0, [translate, translate]))
|
| 530 |
+
if autoaug != 'CA_multiple_noSingle':
|
| 531 |
+
transform_single_factor.append(transforms.RandomAffine(0, [translate, translate]))
|
| 532 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple' or autoaug == 'CA_multiple_noSingle':
|
| 533 |
+
transform_CA.append(transforms.RandomAffine(0, [translate, translate]))
|
| 534 |
+
if autoaug is not None:
|
| 535 |
+
if autoaug == 'CA':
|
| 536 |
+
print("--------------------------CA--------------------------")
|
| 537 |
+
print("n:",n)
|
| 538 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 539 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 540 |
+
transform_CA.append(CounterfactualAugment_incausal(factor_num))
|
| 541 |
+
elif autoaug == 'CA_multiple':
|
| 542 |
+
print("--------------------------CA_multiple--------------------------")
|
| 543 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 544 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 545 |
+
transform_CA.append(MultiCounterfactualAugment_incausal(factor_num, stride))
|
| 546 |
+
elif autoaug == 'CA_multiple_noSingle':
|
| 547 |
+
print("--------------------------CA_multiple_noSingle--------------------------")
|
| 548 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 549 |
+
# transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 550 |
+
transform_CA.append(MultiCounterfactualAugment_incausal(factor_num, stride))
|
| 551 |
+
elif autoaug == 'Ours_A':
|
| 552 |
+
print("--------------------------Ours_Augment--------------------------")
|
| 553 |
+
transform.append(RandAugment_incausal(n,4,factor_num, randm=randm,randn=randn))
|
| 554 |
+
transform_single_factor.append(FactualAugment_incausal(4, factor_num, randm=False))
|
| 555 |
+
|
| 556 |
+
transform.append(transforms.ToTensor())
|
| 557 |
+
transform = transforms.Compose(transform)
|
| 558 |
+
if autoaug != 'CA_multiple_noSingle':
|
| 559 |
+
transform_single_factor.append(transforms.ToTensor())
|
| 560 |
+
transform_single_factor = transforms.Compose(transform_single_factor)
|
| 561 |
+
if autoaug == 'CA' or autoaug == 'CA_multiple':
|
| 562 |
+
transform_CA.append(transforms.ToTensor())
|
| 563 |
+
transform_CA = transforms.Compose(transform_CA)
|
| 564 |
+
dataset = myTensorDataset(x, y, transform=transform, transform2=transform_single_factor, transform3=transform_CA,twox=twox)
|
| 565 |
+
elif autoaug == 'CA_multiple_noSingle':
|
| 566 |
+
transform_CA.append(transforms.ToTensor())
|
| 567 |
+
transform_CA = transforms.Compose(transform_CA)
|
| 568 |
+
dataset = myTensorDataset(x, y, transform=transform, transform3=transform_CA,twox=twox)
|
| 569 |
+
else:
|
| 570 |
+
dataset = myTensorDataset(x, y, transform=transform, transform2=transform_single_factor, twox=twox)
|
| 571 |
+
# print(x.shape)
|
| 572 |
+
# print(y.shape)
|
| 573 |
+
return dataset
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
def load_cifar10_c_level1(dataroot):
|
| 577 |
+
path = f'data/cifar10_c_level1.pkl'
|
| 578 |
+
if not os.path.exists(path):
|
| 579 |
+
print("genenrating cifar10_c_level1")
|
| 580 |
+
labels = np.load(os.path.join(dataroot, 'labels.npy'))
|
| 581 |
+
y_single = labels[0:10000]
|
| 582 |
+
x = torch.zeros((190000,3,32,32))
|
| 583 |
+
for j in range(19):
|
| 584 |
+
if j == 0:
|
| 585 |
+
y = y_single
|
| 586 |
+
else:
|
| 587 |
+
y = np.hstack((y,y_single))
|
| 588 |
+
index = 0
|
| 589 |
+
for filename in os.listdir(dataroot):
|
| 590 |
+
if filename=='labels.npy':
|
| 591 |
+
continue
|
| 592 |
+
else:
|
| 593 |
+
imgs = np.load(os.path.join(dataroot,filename))
|
| 594 |
+
imgs = imgs.transpose(0,3,1,2)
|
| 595 |
+
imgs = torch.tensor(imgs)
|
| 596 |
+
imgs = imgs.float()/255.
|
| 597 |
+
print(imgs.shape)
|
| 598 |
+
x[index*10000:(index+1)*10000] = imgs[0:10000]
|
| 599 |
+
index = index + 1
|
| 600 |
+
y = torch.tensor(y)
|
| 601 |
+
with open(path, 'wb') as f:
|
| 602 |
+
pickle.dump([x, y], f)
|
| 603 |
+
else:
|
| 604 |
+
print("reading cifar10_c_level1")
|
| 605 |
+
with open(path, 'rb') as f:
|
| 606 |
+
x, y = pickle.load(f)
|
| 607 |
+
dataset = TensorDataset(x, y)
|
| 608 |
+
return dataset
|
| 609 |
+
def load_cifar10_c_level2(dataroot):
|
| 610 |
+
path = f'data/cifar10_c_level2.pkl'
|
| 611 |
+
if not os.path.exists(path):
|
| 612 |
+
print("genenrating cifar10_c_level2")
|
| 613 |
+
labels = np.load(os.path.join(dataroot, 'labels.npy'))
|
| 614 |
+
y_single = labels[0:10000]
|
| 615 |
+
x = torch.zeros((190000,3,32,32))
|
| 616 |
+
for j in range(19):
|
| 617 |
+
if j == 0:
|
| 618 |
+
y = y_single
|
| 619 |
+
else:
|
| 620 |
+
y = np.hstack((y,y_single))
|
| 621 |
+
index = 0
|
| 622 |
+
for filename in os.listdir(dataroot):
|
| 623 |
+
if filename=='labels.npy':
|
| 624 |
+
continue
|
| 625 |
+
else:
|
| 626 |
+
imgs = np.load(os.path.join(dataroot,filename))
|
| 627 |
+
imgs = imgs.transpose(0,3,1,2)
|
| 628 |
+
imgs = torch.tensor(imgs)
|
| 629 |
+
imgs = imgs.float()/255.
|
| 630 |
+
print(imgs.shape)
|
| 631 |
+
x[index*10000:(index+1)*10000] = imgs[10000:20000]
|
| 632 |
+
index = index + 1
|
| 633 |
+
y = torch.tensor(y)
|
| 634 |
+
with open(path, 'wb') as f:
|
| 635 |
+
pickle.dump([x, y], f)
|
| 636 |
+
else:
|
| 637 |
+
print("reading cifar10_c_level2")
|
| 638 |
+
with open(path, 'rb') as f:
|
| 639 |
+
x, y = pickle.load(f)
|
| 640 |
+
dataset = TensorDataset(x, y)
|
| 641 |
+
return dataset
|
| 642 |
+
def load_cifar10_c_level3(dataroot):
|
| 643 |
+
path = f'data/cifar10_c_level3.pkl'
|
| 644 |
+
if not os.path.exists(path):
|
| 645 |
+
print("generating cifar10_c_level3")
|
| 646 |
+
labels = np.load(os.path.join(dataroot, 'labels.npy'))
|
| 647 |
+
y_single = labels[0:10000]
|
| 648 |
+
x = torch.zeros((190000,3,32,32))
|
| 649 |
+
for j in range(19):
|
| 650 |
+
if j == 0:
|
| 651 |
+
y = y_single
|
| 652 |
+
else:
|
| 653 |
+
y = np.hstack((y,y_single))
|
| 654 |
+
index = 0
|
| 655 |
+
for filename in os.listdir(dataroot):
|
| 656 |
+
if filename=='labels.npy':
|
| 657 |
+
continue
|
| 658 |
+
else:
|
| 659 |
+
imgs = np.load(os.path.join(dataroot,filename))
|
| 660 |
+
imgs = imgs.transpose(0,3,1,2)
|
| 661 |
+
imgs = torch.tensor(imgs)
|
| 662 |
+
imgs = imgs.float()/255.
|
| 663 |
+
print(imgs.shape)
|
| 664 |
+
x[index*10000:(index+1)*10000] = imgs[20000:30000]
|
| 665 |
+
index = index + 1
|
| 666 |
+
y = torch.tensor(y)
|
| 667 |
+
with open(path, 'wb') as f:
|
| 668 |
+
pickle.dump([x, y], f)
|
| 669 |
+
else:
|
| 670 |
+
print("reading cifar10_c_level3")
|
| 671 |
+
with open(path, 'rb') as f:
|
| 672 |
+
x, y = pickle.load(f)
|
| 673 |
+
dataset = TensorDataset(x, y)
|
| 674 |
+
return dataset
|
| 675 |
+
def load_cifar10_c_level4(dataroot):
|
| 676 |
+
path = f'data/cifar10_c_level4.pkl'
|
| 677 |
+
if not os.path.exists(path):
|
| 678 |
+
print("genenrating cifar10_c_level4")
|
| 679 |
+
labels = np.load(os.path.join(dataroot, 'labels.npy'))
|
| 680 |
+
y_single = labels[0:10000]
|
| 681 |
+
x = torch.zeros((190000,3,32,32))
|
| 682 |
+
for j in range(19):
|
| 683 |
+
if j == 0:
|
| 684 |
+
y = y_single
|
| 685 |
+
else:
|
| 686 |
+
y = np.hstack((y,y_single))
|
| 687 |
+
index = 0
|
| 688 |
+
for filename in os.listdir(dataroot):
|
| 689 |
+
if filename=='labels.npy':
|
| 690 |
+
continue
|
| 691 |
+
else:
|
| 692 |
+
imgs = np.load(os.path.join(dataroot,filename))
|
| 693 |
+
imgs = imgs.transpose(0,3,1,2)
|
| 694 |
+
imgs = torch.tensor(imgs)
|
| 695 |
+
imgs = imgs.float()/255.
|
| 696 |
+
print(imgs.shape)
|
| 697 |
+
x[index*10000:(index+1)*10000] = imgs[30000:40000]
|
| 698 |
+
index = index + 1
|
| 699 |
+
y = torch.tensor(y)
|
| 700 |
+
with open(path, 'wb') as f:
|
| 701 |
+
pickle.dump([x, y], f)
|
| 702 |
+
else:
|
| 703 |
+
print("reading cifar10_c_level4")
|
| 704 |
+
with open(path, 'rb') as f:
|
| 705 |
+
x, y = pickle.load(f)
|
| 706 |
+
dataset = TensorDataset(x, y)
|
| 707 |
+
return dataset
|
| 708 |
+
def load_cifar10_c_level5(dataroot):
|
| 709 |
+
path = f'data/cifar10_c_level5.pkl'
|
| 710 |
+
if not os.path.exists(path):
|
| 711 |
+
print("genenrating cifar10_c_level5")
|
| 712 |
+
labels = np.load(os.path.join(dataroot, 'labels.npy'))
|
| 713 |
+
y_single = labels[0:10000]
|
| 714 |
+
x = torch.zeros((190000,3,32,32))
|
| 715 |
+
for j in range(19):
|
| 716 |
+
if j == 0:
|
| 717 |
+
y = y_single
|
| 718 |
+
else:
|
| 719 |
+
y = np.hstack((y,y_single))
|
| 720 |
+
index = 0
|
| 721 |
+
for filename in os.listdir(dataroot):
|
| 722 |
+
if filename=='labels.npy':
|
| 723 |
+
continue
|
| 724 |
+
else:
|
| 725 |
+
imgs = np.load(os.path.join(dataroot,filename))
|
| 726 |
+
imgs = imgs.transpose(0,3,1,2)
|
| 727 |
+
imgs = torch.tensor(imgs)
|
| 728 |
+
imgs = imgs.float()/255.
|
| 729 |
+
print(imgs.shape)
|
| 730 |
+
x[index*10000:(index+1)*10000] = imgs[40000:50000]
|
| 731 |
+
index = index + 1
|
| 732 |
+
y = torch.tensor(y)
|
| 733 |
+
with open(path, 'wb') as f:
|
| 734 |
+
pickle.dump([x, y], f)
|
| 735 |
+
else:
|
| 736 |
+
print("reading cifar10_c_level5")
|
| 737 |
+
with open(path, 'rb') as f:
|
| 738 |
+
x, y = pickle.load(f)
|
| 739 |
+
dataset = TensorDataset(x, y)
|
| 740 |
+
return dataset
|
| 741 |
+
def load_cifar10_c(dataroot):
|
| 742 |
+
y = np.load(os.path.join(dataroot, 'labels.npy'))
|
| 743 |
+
print("y.shape:",y.shape)
|
| 744 |
+
y_single = y[0:10000]
|
| 745 |
+
x1 = torch.zeros((190000,3,32,32))
|
| 746 |
+
x2 = torch.zeros((190000,3,32,32))
|
| 747 |
+
x3 = torch.zeros((190000,3,32,32))
|
| 748 |
+
x4 = torch.zeros((190000,3,32,32))
|
| 749 |
+
x5 = torch.zeros((190000,3,32,32))
|
| 750 |
+
for j in range(19):
|
| 751 |
+
if j == 0:
|
| 752 |
+
y_total = y_single
|
| 753 |
+
else:
|
| 754 |
+
y_total = np.hstack((y_total,y_single))
|
| 755 |
+
print("y_total.shape:",y_total.shape)
|
| 756 |
+
index = 0
|
| 757 |
+
for filename in os.listdir(dataroot):
|
| 758 |
+
if filename=='labels.npy':
|
| 759 |
+
continue
|
| 760 |
+
else:
|
| 761 |
+
x = np.load(os.path.join(dataroot,filename))
|
| 762 |
+
x = x.transpose(0,3,1,2)
|
| 763 |
+
x = torch.tensor(x)
|
| 764 |
+
x = x.float()/255.
|
| 765 |
+
print(x.shape)
|
| 766 |
+
x1[index*10000:(index+1)*10000] = x[0:10000]
|
| 767 |
+
x2[index*10000:(index+1)*10000] = x[10000:20000]
|
| 768 |
+
x3[index*10000:(index+1)*10000] = x[20000:30000]
|
| 769 |
+
x4[index*10000:(index+1)*10000] = x[30000:40000]
|
| 770 |
+
x5[index*10000:(index+1)*10000] = x[40000:50000]
|
| 771 |
+
index = index + 1
|
| 772 |
+
# x1, x2, x3, x4, x5, y_total = torch.tensor(x1), torch.tensor(x2), torch.tensor(x3),\
|
| 773 |
+
# torch.tensor(x4),torch.tensor(x5),torch.tensor(y_total)
|
| 774 |
+
y_total = torch.tensor(y_total)
|
| 775 |
+
dataset1 = TensorDataset(x1, y_total)
|
| 776 |
+
dataset2 = TensorDataset(x2, y_total)
|
| 777 |
+
dataset3 = TensorDataset(x3, y_total)
|
| 778 |
+
dataset4 = TensorDataset(x4, y_total)
|
| 779 |
+
dataset5 = TensorDataset(x5, y_total)
|
| 780 |
+
return dataset1,dataset2,dataset3,dataset4,dataset5
|
| 781 |
+
|
| 782 |
+
def load_cifar10_c_class(dataroot,CORRUPTIONS):
|
| 783 |
+
y = np.load(os.path.join(dataroot, 'labels.npy'))
|
| 784 |
+
y_single = y[0:10000]
|
| 785 |
+
y_single = torch.tensor(y_single)
|
| 786 |
+
print("y.shape:",y.shape)
|
| 787 |
+
x = np.load(os.path.join(dataroot,CORRUPTIONS+'.npy'))
|
| 788 |
+
print("loading data of",os.path.join(dataroot,CORRUPTIONS+'.npy'))
|
| 789 |
+
x = x.transpose(0,3,1,2)
|
| 790 |
+
x = torch.tensor(x)
|
| 791 |
+
x = x.float()/255.
|
| 792 |
+
dataset = []
|
| 793 |
+
for i in range(5):
|
| 794 |
+
x_single = x[i*10000:(i+1)*10000]
|
| 795 |
+
dataset.append(TensorDataset(x_single, y_single))
|
| 796 |
+
return dataset
|
| 797 |
+
|
| 798 |
+
def load_usps(split='train', channels=3):
|
| 799 |
+
path = f'/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/usps-{split}.pkl'
|
| 800 |
+
if not os.path.exists(path):
|
| 801 |
+
dataset = USPS(f'{HOME}/.pytorch/USPS', train=(split=='train'), download=True)
|
| 802 |
+
x, y = dataset.data, dataset.targets
|
| 803 |
+
x = torch.tensor(resize_imgs(x, 32))
|
| 804 |
+
x = (x.float()/255.).unsqueeze(1).repeat(1,3,1,1)
|
| 805 |
+
y = torch.tensor(y)
|
| 806 |
+
with open(path, 'wb') as f:
|
| 807 |
+
pickle.dump([x, y], f)
|
| 808 |
+
with open(path, 'rb') as f:
|
| 809 |
+
x, y = pickle.load(f)
|
| 810 |
+
if channels == 1:
|
| 811 |
+
x = x[:,0:1,:,:]
|
| 812 |
+
dataset = TensorDataset(x, y)
|
| 813 |
+
return dataset
|
| 814 |
+
|
| 815 |
+
def load_svhn(split='train', channels=3):
|
| 816 |
+
dataset = SVHN(f'{HOME}/.pytorch/SVHN', split=split, download=True)
|
| 817 |
+
x, y = dataset.data, dataset.labels
|
| 818 |
+
x = x.astype('float32')/255.
|
| 819 |
+
x, y = torch.tensor(x), torch.tensor(y)
|
| 820 |
+
if channels == 1:
|
| 821 |
+
x = x.mean(1, keepdim=True)
|
| 822 |
+
dataset = TensorDataset(x, y)
|
| 823 |
+
return dataset
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
def load_syndigit(split='train', channels=3):
|
| 827 |
+
path = f'/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/synth_{split}_32x32.mat'
|
| 828 |
+
data = loadmat(path)
|
| 829 |
+
x, y = data['X'], data['y']
|
| 830 |
+
x = np.transpose(x, [3, 2, 0, 1]).astype('float32')/255.
|
| 831 |
+
y = y.squeeze()
|
| 832 |
+
x, y = torch.tensor(x), torch.tensor(y)
|
| 833 |
+
if channels == 1:
|
| 834 |
+
x = x.mean(1, keepdim=True)
|
| 835 |
+
dataset = TensorDataset(x, y)
|
| 836 |
+
return dataset
|
| 837 |
+
|
| 838 |
+
def load_mnist_m(split='train', channels=3):
|
| 839 |
+
path = f'/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/data/mnist_m-{split}.pkl'
|
| 840 |
+
with open(path, 'rb') as f:
|
| 841 |
+
x, y = pickle.load(f)
|
| 842 |
+
x, y = torch.tensor(x.astype('float32')/255.), torch.tensor(y)
|
| 843 |
+
if channels==1:
|
| 844 |
+
x = x.mean(1, keepdim=True)
|
| 845 |
+
dataset = TensorDataset(x, y)
|
| 846 |
+
return dataset
|
| 847 |
+
|
| 848 |
+
if __name__=='__main__':
|
| 849 |
+
dataset = load_mnist(split='train')
|
| 850 |
+
print('mnist train', len(dataset))
|
| 851 |
+
dataset = load_mnist('test')
|
| 852 |
+
print('mnist test', len(dataset))
|
| 853 |
+
dataset = load_mnist_m('test')
|
| 854 |
+
print('mnsit_m test', len(dataset))
|
| 855 |
+
dataset = load_svhn(split='test')
|
| 856 |
+
print('svhn', len(dataset))
|
| 857 |
+
dataset = load_usps(split='test')
|
| 858 |
+
print('usps', len(dataset))
|
| 859 |
+
dataset = load_syndigit(split='test')
|
| 860 |
+
print('syndigit', len(dataset))
|
| 861 |
+
|
Meta-causal/code-stage1-pipeline/env.yaml
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Py3.7_torch1.8
|
| 2 |
+
channels:
|
| 3 |
+
- https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge
|
| 4 |
+
- https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
|
| 5 |
+
- https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/bioconda/
|
| 6 |
+
- https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/
|
| 7 |
+
- https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/
|
| 8 |
+
- https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
|
| 9 |
+
- conda-forge
|
| 10 |
+
- bioconda
|
| 11 |
+
- defaults
|
| 12 |
+
dependencies:
|
| 13 |
+
- _libgcc_mutex=0.1=main
|
| 14 |
+
- asn1crypto=1.2.0=py37_0
|
| 15 |
+
- blas=1.0=mkl
|
| 16 |
+
- bottleneck=1.3.2=py37heb32a55_1
|
| 17 |
+
- bzip2=1.0.8=h7b6447c_0
|
| 18 |
+
- ca-certificates=2021.10.8=ha878542_0
|
| 19 |
+
- cairo=1.14.12=h8948797_3
|
| 20 |
+
- certifi=2021.10.8=py37h89c1867_1
|
| 21 |
+
- cffi=1.13.0=py37h2e261b9_0
|
| 22 |
+
- chardet=3.0.4=py37_1003
|
| 23 |
+
- click=8.0.3=pyhd3eb1b0_0
|
| 24 |
+
- conda-package-handling=1.6.0=py37h7b6447c_0
|
| 25 |
+
- cryptography=2.8=py37h1ba5d50_0
|
| 26 |
+
- ffmpeg=4.0=hcdf2ecd_0
|
| 27 |
+
- fontconfig=2.13.0=h9420a91_0
|
| 28 |
+
- freeglut=3.0.0=hf484d3e_5
|
| 29 |
+
- freetype=2.11.0=h70c0345_0
|
| 30 |
+
- glib=2.63.1=h5a9c865_0
|
| 31 |
+
- graphite2=1.3.14=h23475e2_0
|
| 32 |
+
- h5py=2.8.0=py37h3010b51_1003
|
| 33 |
+
- harfbuzz=1.8.8=hffaf4a1_0
|
| 34 |
+
- hdf5=1.10.2=hba1933b_1
|
| 35 |
+
- icu=58.2=he6710b0_3
|
| 36 |
+
- idna=2.8=py37_0
|
| 37 |
+
- intel-openmp=2021.3.0=h06a4308_3350
|
| 38 |
+
- jasper=2.0.14=hd8c5072_2
|
| 39 |
+
- jpeg=9d=h7f8727e_0
|
| 40 |
+
- libedit=3.1.20181209=hc058e9b_0
|
| 41 |
+
- libffi=3.2.1=hd88cf55_4
|
| 42 |
+
- libgcc-ng=9.1.0=hdf63c60_0
|
| 43 |
+
- libgfortran-ng=7.5.0=ha8ba4b0_17
|
| 44 |
+
- libgfortran4=7.5.0=ha8ba4b0_17
|
| 45 |
+
- libglu=9.0.0=hf484d3e_1
|
| 46 |
+
- libopencv=3.4.2=hb342d67_1
|
| 47 |
+
- libopus=1.3.1=h7b6447c_0
|
| 48 |
+
- libpng=1.6.37=hbc83047_0
|
| 49 |
+
- libprotobuf=3.17.2=h4ff587b_1
|
| 50 |
+
- libstdcxx-ng=9.1.0=hdf63c60_0
|
| 51 |
+
- libtiff=4.1.0=h2733197_0
|
| 52 |
+
- libuuid=1.0.3=h7f8727e_2
|
| 53 |
+
- libvpx=1.7.0=h439df22_0
|
| 54 |
+
- libxcb=1.14=h7b6447c_0
|
| 55 |
+
- libxml2=2.9.9=hea5a465_1
|
| 56 |
+
- mkl=2021.3.0=h06a4308_520
|
| 57 |
+
- mkl-service=2.4.0=py37h7f8727e_0
|
| 58 |
+
- mkl_fft=1.3.1=py37hd3c417c_0
|
| 59 |
+
- mkl_random=1.2.2=py37h51133e4_0
|
| 60 |
+
- ncurses=6.1=he6710b0_1
|
| 61 |
+
- numexpr=2.7.3=py37h22e1b3c_1
|
| 62 |
+
- numpy-base=1.21.2=py37h79a1101_0
|
| 63 |
+
- opencv=3.4.2=py37h6fd60c2_1
|
| 64 |
+
- openssl=1.1.1h=h516909a_0
|
| 65 |
+
- pandas=1.3.3=py37h8c16a72_0
|
| 66 |
+
- pcre=8.45=h295c915_0
|
| 67 |
+
- pip=19.3.1=py37_0
|
| 68 |
+
- pixman=0.40.0=h7f8727e_1
|
| 69 |
+
- protobuf=3.17.2=py37h295c915_0
|
| 70 |
+
- py-opencv=3.4.2=py37hb342d67_1
|
| 71 |
+
- pycosat=0.6.3=py37h14c3975_0
|
| 72 |
+
- pycparser=2.19=py37_0
|
| 73 |
+
- pyopenssl=19.0.0=py37_0
|
| 74 |
+
- pysocks=1.7.1=py37_0
|
| 75 |
+
- python=3.7.4=h265db76_1
|
| 76 |
+
- python-dateutil=2.8.2=pyhd3eb1b0_0
|
| 77 |
+
- python_abi=3.7=2_cp37m
|
| 78 |
+
- pytz=2021.3=pyhd3eb1b0_0
|
| 79 |
+
- readline=7.0=h7b6447c_5
|
| 80 |
+
- requests=2.22.0=py37_0
|
| 81 |
+
- ruamel_yaml=0.15.46=py37h14c3975_0
|
| 82 |
+
- scipy=1.7.1=py37h292c36d_2
|
| 83 |
+
- setuptools=41.4.0=py37_0
|
| 84 |
+
- six=1.12.0=py37_0
|
| 85 |
+
- sqlite=3.30.0=h7b6447c_0
|
| 86 |
+
- tensorboardx=2.2=pyhd3eb1b0_0
|
| 87 |
+
- tk=8.6.8=hbc83047_0
|
| 88 |
+
- tqdm=4.36.1=py_0
|
| 89 |
+
- urllib3=1.24.2=py37_0
|
| 90 |
+
- wheel=0.33.6=py37_0
|
| 91 |
+
- xz=5.2.4=h14c3975_4
|
| 92 |
+
- yaml=0.1.7=had09818_2
|
| 93 |
+
- zlib=1.2.11=h7b6447c_3
|
| 94 |
+
- zstd=1.3.7=h0b5b093_0
|
| 95 |
+
- pip:
|
| 96 |
+
- absl-py==1.0.0
|
| 97 |
+
- cachetools==4.2.4
|
| 98 |
+
- conda-pack==0.6.0
|
| 99 |
+
- google-auth==2.3.3
|
| 100 |
+
- google-auth-oauthlib==0.4.6
|
| 101 |
+
- grpcio==1.42.0
|
| 102 |
+
- importlib-metadata==4.8.2
|
| 103 |
+
- markdown==3.3.6
|
| 104 |
+
- numpy==1.21.3
|
| 105 |
+
- oauthlib==3.1.1
|
| 106 |
+
- pillow==8.4.0
|
| 107 |
+
- pyasn1==0.4.8
|
| 108 |
+
- pyasn1-modules==0.2.8
|
| 109 |
+
- requests-oauthlib==1.3.0
|
| 110 |
+
- rsa==4.8
|
| 111 |
+
- tensorboard==2.7.0
|
| 112 |
+
- tensorboard-data-server==0.6.1
|
| 113 |
+
- tensorboard-plugin-wit==1.8.0
|
| 114 |
+
- torch==1.8.1+cu111
|
| 115 |
+
- torchvision==0.9.1+cu111
|
| 116 |
+
- typing-extensions==3.10.0.2
|
| 117 |
+
- werkzeug==2.0.2
|
| 118 |
+
- zipp==3.6.0
|
| 119 |
+
prefix: /home/chenjin/miniconda3/envs/Py3.7_torch1.8
|
Meta-causal/code-stage1-pipeline/main_my_joint_v13_auto.py
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
'''
|
| 3 |
+
训练 base 模型
|
| 4 |
+
'''
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
import itertools
|
| 10 |
+
from torch import optim
|
| 11 |
+
from torch.utils.data import DataLoader, RandomSampler
|
| 12 |
+
from torchvision import models
|
| 13 |
+
from torchvision.datasets import CIFAR10
|
| 14 |
+
from torchvision.utils import make_grid
|
| 15 |
+
import torchvision.transforms as transforms
|
| 16 |
+
from tensorboardX import SummaryWriter
|
| 17 |
+
from torch.cuda.amp import autocast,GradScaler
|
| 18 |
+
|
| 19 |
+
import os
|
| 20 |
+
import click
|
| 21 |
+
import time
|
| 22 |
+
import numpy as np
|
| 23 |
+
|
| 24 |
+
from network import mnist_net_my as mnist_net
|
| 25 |
+
from network import wideresnet as wideresnet
|
| 26 |
+
from network import resnet as resnet
|
| 27 |
+
from network import adaptor_v2
|
| 28 |
+
|
| 29 |
+
from tools import causalaugment_v3 as causalaugment
|
| 30 |
+
import data_loader_joint_v3 as data_loader
|
| 31 |
+
# from utils import set_requires_grad
|
| 32 |
+
|
| 33 |
+
HOME = os.environ['HOME']
|
| 34 |
+
|
| 35 |
+
@click.command()
|
| 36 |
+
@click.option('--gpu', type=str, default='0', help='选择gpu')
|
| 37 |
+
@click.option('--data', type=str, default='mnist', help='数据集名称')
|
| 38 |
+
@click.option('--ntr', type=int, default=None, help='选择训练集前ntr个样本')
|
| 39 |
+
@click.option('--translate', type=float, default=None, help='随机平移数据增强')
|
| 40 |
+
@click.option('--autoaug', type=str, default=None, help='AA FastAA RA')
|
| 41 |
+
@click.option('--n', type=int, default=3, help='选择多少个factor生成RA')
|
| 42 |
+
@click.option('--stride', type=int, default=5, help='if autoaug==CA_multiple, stride is used')
|
| 43 |
+
@click.option('--factor_num', type=int, default=16, help='the first n factors')
|
| 44 |
+
@click.option('--epochs', type=int, default=100)
|
| 45 |
+
@click.option('--nbatch', type=int, default=100, help='每个epoch中batch的数量')
|
| 46 |
+
@click.option('--batchsize', type=int, default=128, help='每个batch中样本的数量')
|
| 47 |
+
@click.option('--lr', type=float, default=1e-3)
|
| 48 |
+
@click.option('--lr_scheduler', type=str, default='none', help='是否选择学习率衰减策略')
|
| 49 |
+
@click.option('--svroot', type=str, default='./saved', help='项目文件保存路径')
|
| 50 |
+
@click.option('--clsadapt', type=bool, default=True, help='映射后是否用分类损失')
|
| 51 |
+
@click.option('--lambda_causal', type=float, default=1, help='the weight of reconstruction during mapping and causal ')
|
| 52 |
+
@click.option('--lambda_re', type=float, default=1, help='the weight of reconstruction during mapping and causal ')
|
| 53 |
+
@click.option('--randm', type=bool, default=True, help='m取值是否randm')
|
| 54 |
+
@click.option('--randn', type=bool, default=False, help='原始特征是否detach')
|
| 55 |
+
@click.option('--network', type=str, default='resnet18', help='项目文件保存路径')
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def experiment(gpu, data, ntr, translate, autoaug, n, stride, factor_num, epochs, nbatch, batchsize, lr, lr_scheduler, svroot, clsadapt, lambda_causal,lambda_re,randm,randn,network):
|
| 59 |
+
settings = locals().copy()
|
| 60 |
+
print(settings)
|
| 61 |
+
|
| 62 |
+
# 全局设置
|
| 63 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
|
| 64 |
+
if not os.path.exists(svroot):
|
| 65 |
+
os.makedirs(svroot)
|
| 66 |
+
log_file = open(svroot+os.sep+'log.log',"w")
|
| 67 |
+
log_file.write(str(settings)+'\n')
|
| 68 |
+
writer = SummaryWriter(svroot)
|
| 69 |
+
|
| 70 |
+
# 加载数据集和模型
|
| 71 |
+
if data in ['mnist', 'mnist_t']:
|
| 72 |
+
if data == 'mnist':
|
| 73 |
+
trset = data_loader.load_mnist('train', translate=translate,twox=True, ntr=ntr, factor_num=factor_num,autoaug=autoaug,randm=randm,randn=randn,n=n,stride=stride)
|
| 74 |
+
elif data == 'mnist_t':
|
| 75 |
+
trset = data_loader.load_mnist_t('train', translate=translate, ntr=ntr)
|
| 76 |
+
teset = data_loader.load_mnist('test')
|
| 77 |
+
trloader = DataLoader(trset, batch_size=batchsize, num_workers=0, \
|
| 78 |
+
sampler=RandomSampler(trset, True, nbatch*batchsize))
|
| 79 |
+
teloader = DataLoader(teset, batch_size=batchsize, num_workers=0, shuffle=False)
|
| 80 |
+
cls_net = mnist_net.ConvNet().cuda()
|
| 81 |
+
|
| 82 |
+
parameter_list = []
|
| 83 |
+
parameter_list.append({'params':cls_net.parameters(),'lr':lr})
|
| 84 |
+
opt = optim.Adam(parameter_list, lr=lr)
|
| 85 |
+
if lr_scheduler == 'cosine':
|
| 86 |
+
scheduler = optim.lr_scheduler.CosineAnnealingLR(opt, epochs)
|
| 87 |
+
elif lr_scheduler == 'Exp':
|
| 88 |
+
scheduler = optim.lr_scheduler.ExponentialLR(opt, gamma=0.95)
|
| 89 |
+
elif lr_scheduler == 'Step':
|
| 90 |
+
scheduler = optim.lr_scheduler.StepLR(opt, step_size=int(epochs*0.8))
|
| 91 |
+
|
| 92 |
+
elif data == 'cifar10':
|
| 93 |
+
# 加载数据集
|
| 94 |
+
trset = data_loader.load_cifar10(split='train',twox=True, factor_num=factor_num,autoaug=autoaug,randm=randm,randn=randn,n=n,stride=stride)
|
| 95 |
+
teset = data_loader.load_cifar10(split='test')
|
| 96 |
+
trloader = DataLoader(trset, batch_size=batchsize, num_workers=4, shuffle=True, drop_last=True)
|
| 97 |
+
teloader = DataLoader(teset, batch_size=batchsize, num_workers=4, shuffle=False)
|
| 98 |
+
cls_net = wideresnet.WideResNet(16, 10, 4).cuda()
|
| 99 |
+
# cls_opt = optim.SGD(cls_net.parameters(), lr=lr, momentum=0.9, nesterov=True, weight_decay=5e-4)
|
| 100 |
+
AdaptNet = []
|
| 101 |
+
parameter_list = []
|
| 102 |
+
for i in range(factor_num):
|
| 103 |
+
mapping = adaptor_v2.mapping(256,512,256,4).cuda()
|
| 104 |
+
AdaptNet.append(mapping)
|
| 105 |
+
parameter_list.append({'params':mapping.parameters(),'lr':lr})
|
| 106 |
+
if autoaug == 'CA_multiple':
|
| 107 |
+
var_num = len(list(range(0, 31, stride)))
|
| 108 |
+
E_to_W = adaptor_v2.effect_to_weight(10,100,1).cuda()
|
| 109 |
+
else:
|
| 110 |
+
E_to_W = adaptor_v2.effect_to_weight(10,100,1).cuda()
|
| 111 |
+
parameter_list.append({'params':cls_net.parameters(),'lr':lr})
|
| 112 |
+
parameter_list.append({'params':E_to_W.parameters(),'lr':lr})
|
| 113 |
+
#print("---------------------------------------------------------------------------------------")
|
| 114 |
+
# opt = optim.Adam(parameter_list)
|
| 115 |
+
opt = optim.SGD(parameter_list, lr=lr, momentum=0.9, nesterov=True, weight_decay=5e-4)
|
| 116 |
+
if lr_scheduler == 'cosine':
|
| 117 |
+
scheduler = optim.lr_scheduler.CosineAnnealingLR(opt, epochs)
|
| 118 |
+
elif lr_scheduler == 'Exp':
|
| 119 |
+
scheduler = optim.lr_scheduler.ExponentialLR(opt, gamma=0.95)
|
| 120 |
+
elif lr_scheduler == 'Step':
|
| 121 |
+
scheduler = optim.lr_scheduler.StepLR(opt, step_size=int(epochs*0.8))
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
elif data in ['art_painting', 'cartoon', 'photo', 'sketch']:
|
| 125 |
+
# 加载数据集
|
| 126 |
+
trset = data_loader.load_pacs(domain=data, split='train', twox=True, factor_num=factor_num,autoaug=autoaug,randm=randm,randn=randn,n=n,stride=stride)
|
| 127 |
+
teset = data_loader.load_pacs(domain=data, split='val')
|
| 128 |
+
trloader = DataLoader(trset, batch_size=batchsize, num_workers=4, shuffle=True, drop_last=True)
|
| 129 |
+
teloader = DataLoader(teset, batch_size=batchsize, num_workers=4, shuffle=False)
|
| 130 |
+
if network == 'resnet18':
|
| 131 |
+
cls_net = resnet.resnet18(classes=7,c_dim=2048).cuda()
|
| 132 |
+
|
| 133 |
+
classifier_param = list(map(id, cls_net.class_classifier.parameters()))
|
| 134 |
+
backbone_param = filter(lambda p: id(p) not in classifier_param and p.requires_grad, cls_net.parameters())
|
| 135 |
+
|
| 136 |
+
parameter_list = []
|
| 137 |
+
parameter_list.append({'params':backbone_param,'lr':0.01*lr})
|
| 138 |
+
parameter_list.append({'params':cls_net.class_classifier.parameters(),'lr':lr})
|
| 139 |
+
|
| 140 |
+
opt = optim.SGD(parameter_list, momentum=0.9, nesterov=True, weight_decay=5e-4)
|
| 141 |
+
if lr_scheduler == 'cosine':
|
| 142 |
+
scheduler = optim.lr_scheduler.CosineAnnealingLR(opt, epochs)
|
| 143 |
+
elif lr_scheduler == 'Exp':
|
| 144 |
+
scheduler = optim.lr_scheduler.ExponentialLR(opt, gamma=0.99999)
|
| 145 |
+
elif lr_scheduler == 'Step':
|
| 146 |
+
scheduler = optim.lr_scheduler.StepLR(opt, step_size=15)
|
| 147 |
+
|
| 148 |
+
cls_criterion = nn.CrossEntropyLoss()
|
| 149 |
+
|
| 150 |
+
# 开始训练
|
| 151 |
+
best_acc = 0
|
| 152 |
+
best_acc_t = 0
|
| 153 |
+
scaler = GradScaler()
|
| 154 |
+
for epoch in range(epochs):
|
| 155 |
+
t1 = time.time()
|
| 156 |
+
loss_list = []
|
| 157 |
+
cls_net.train()
|
| 158 |
+
print(len(trloader))
|
| 159 |
+
for i, (x_four,y) in enumerate(trloader):
|
| 160 |
+
x, x_RA, x_FA, x_CA, y = x_four[0].cuda(), x_four[1].cuda(), x_four[2].cuda(), x_four[3].cuda(), y.cuda()
|
| 161 |
+
#print('x:', x.shape, 'x_RA:', x_RA.shape, 'x_FA:', x_FA.shape, 'x_CA:', x_CA.shape, 'y:', y.shape)
|
| 162 |
+
b, c, h, w = x.shape
|
| 163 |
+
with autocast():
|
| 164 |
+
p,f = cls_net(x)
|
| 165 |
+
#print('p:', p.size(), 'f:', f.size())
|
| 166 |
+
|
| 167 |
+
cls_loss = cls_criterion(p, y)
|
| 168 |
+
#print('cls_loss:', cls_loss)
|
| 169 |
+
|
| 170 |
+
loss = cls_loss
|
| 171 |
+
|
| 172 |
+
opt.zero_grad()
|
| 173 |
+
scaler.scale(loss).backward()
|
| 174 |
+
scaler.step(opt)
|
| 175 |
+
scaler.update()
|
| 176 |
+
#loss_list.append([cls_loss.item(), cls_loss_mapping.item(),cls_loss_causal.item(), re_mapping.item(), re_causal.item()])
|
| 177 |
+
loss_list.append(cls_loss.item())
|
| 178 |
+
|
| 179 |
+
# 调整学习率
|
| 180 |
+
if lr_scheduler in ['cosine', 'Exp', 'Step']:
|
| 181 |
+
writer.add_scalar('scalar/lr', opt.param_groups[0]["lr"], epoch)
|
| 182 |
+
print(opt.param_groups[0]["lr"])
|
| 183 |
+
print("changing lr")
|
| 184 |
+
scheduler.step()
|
| 185 |
+
#cls_loss, cls_loss_mapping, cls_loss_causal, re_mapping, re_causal = np.mean(loss_list, 0)
|
| 186 |
+
cls_loss = np.mean(loss_list)
|
| 187 |
+
|
| 188 |
+
# 测试,并保存最优模型
|
| 189 |
+
cls_net.eval()
|
| 190 |
+
if data in ['mnist', 'mnist_t', 'cifar10', 'mnistvis', 'art_painting', 'cartoon', 'photo', 'sketch']:
|
| 191 |
+
teacc = evaluate(cls_net, teloader)
|
| 192 |
+
|
| 193 |
+
if best_acc < teacc:
|
| 194 |
+
print(f'---------------------saving model at epoch {epoch}----------------------------------------------------')
|
| 195 |
+
log_file.write(f'saving model at epoch {epoch}\n')
|
| 196 |
+
|
| 197 |
+
best_acc = teacc
|
| 198 |
+
torch.save(cls_net.state_dict(),os.path.join(svroot, 'best_cls_net.pkl'))
|
| 199 |
+
|
| 200 |
+
if ((epoch+1)%5==0):
|
| 201 |
+
torch.save(cls_net.state_dict(),os.path.join(svroot, f'epoch{epoch}_cls_net.pkl'))
|
| 202 |
+
|
| 203 |
+
# 保存日志
|
| 204 |
+
t2 = time.time()
|
| 205 |
+
#print(f'epoch {epoch}, time {t2-t1:.2f}, cls_loss {cls_loss:.4f} cls_loss_mapping {cls_loss_mapping:.4f} cls_loss_causal {cls_loss_causal:.4f} re_mapping {re_mapping:.4f} re_causal {re_causal:.4f} /// teacc {teacc:2.2f} lr {opt.param_groups[0]["lr"]:.8f}')
|
| 206 |
+
print(f'epoch {epoch}, time {t2-t1:.2f}, cls_loss {cls_loss:.4f}')
|
| 207 |
+
|
| 208 |
+
#log_file.write(f'epoch {epoch}, time {t2-t1:.2f}, cls_loss {cls_loss:.4f} cls_loss_mapping {cls_loss_mapping:.4f} cls_loss_causal {cls_loss_causal:.4f} re_mapping {re_mapping:.4f} re_causal {re_causal:.4f} /// teacc {teacc:2.2f} lr {opt.param_groups[0]["lr"]:.8f} \n')
|
| 209 |
+
log_file.write(f'epoch {epoch}, time {t2-t1:.2f}, cls_loss {cls_loss:.4f}')
|
| 210 |
+
|
| 211 |
+
writer.add_scalar('scalar/cls_loss', cls_loss, epoch)
|
| 212 |
+
#writer.add_scalar('scalar/cls_loss_mapping', cls_loss_mapping, epoch)
|
| 213 |
+
#writer.add_scalar('scalar/cls_loss_causal', cls_loss_causal, epoch)
|
| 214 |
+
#writer.add_scalar('scalar/re_mapping', re_mapping, epoch)
|
| 215 |
+
#writer.add_scalar('scalar/re_causal', re_causal, epoch)
|
| 216 |
+
writer.add_scalar('scalar/teacc', teacc, epoch)
|
| 217 |
+
|
| 218 |
+
print(f'---------------------saving last model at epoch {epoch}----------------------------------------------------')
|
| 219 |
+
log_file.write(f'saving last model at epoch {epoch}\n')
|
| 220 |
+
torch.save(cls_net.state_dict(),os.path.join(svroot, 'last_cls_net.pkl'))
|
| 221 |
+
writer.close()
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def evalute_pacs(source_domain,cls_net):
|
| 225 |
+
cls_net.eval()
|
| 226 |
+
data_total = ['art_painting', 'cartoon', 'photo', 'sketch']
|
| 227 |
+
target = [i for i in data_total if i!=source_domain]
|
| 228 |
+
acc = np.zeros(len(target))
|
| 229 |
+
for idx, data in enumerate(target):
|
| 230 |
+
teset = data_loader.load_pacs(data, 'test')
|
| 231 |
+
teloader = DataLoader(teset, batch_size=6, num_workers=0)
|
| 232 |
+
# 计算评价指标
|
| 233 |
+
acc[idx] = evaluate(cls_net, teloader)
|
| 234 |
+
acc_avg = sum(acc)/len(target)
|
| 235 |
+
return acc_avg,acc
|
| 236 |
+
|
| 237 |
+
def evaluate(net, teloader):
|
| 238 |
+
ps = []
|
| 239 |
+
ys = []
|
| 240 |
+
for i,(x1, y1) in enumerate(teloader):
|
| 241 |
+
with torch.no_grad():
|
| 242 |
+
x1 = x1.cuda()
|
| 243 |
+
p1,_ = net(x1, mode='fc')
|
| 244 |
+
p1 = p1.argmax(dim=1)
|
| 245 |
+
ps.append(p1.detach().cpu().numpy())
|
| 246 |
+
ys.append(y1.numpy())
|
| 247 |
+
# 计算评价指标
|
| 248 |
+
ps = np.concatenate(ps)
|
| 249 |
+
ys = np.concatenate(ys)
|
| 250 |
+
acc = np.mean(ys==ps)*100
|
| 251 |
+
return acc
|
| 252 |
+
|
| 253 |
+
def extract_feature(net, teloader, savedir):
|
| 254 |
+
ps = []
|
| 255 |
+
ys = []
|
| 256 |
+
for i,(x1, y1) in enumerate(teloader):
|
| 257 |
+
img_class = y1[0].cpu().numpy()
|
| 258 |
+
save_path = os.path.join(savedir,str(img_class))
|
| 259 |
+
if not os.path.exists(save_path):
|
| 260 |
+
os.makedirs(save_path)
|
| 261 |
+
|
| 262 |
+
with torch.no_grad():
|
| 263 |
+
x1 = x1.cuda()
|
| 264 |
+
p1,f1 = net(x1, mode='fc')
|
| 265 |
+
save_name = save_path+os.sep+str(i)+'.npy'
|
| 266 |
+
np.save(save_name,f1.cpu())
|
| 267 |
+
p1 = p1.argmax(dim=1)
|
| 268 |
+
ps.append(p1.detach().cpu().numpy())
|
| 269 |
+
ys.append(y1.numpy())
|
| 270 |
+
# 计算评价指标
|
| 271 |
+
ps = np.concatenate(ps)
|
| 272 |
+
ys = np.concatenate(ys)
|
| 273 |
+
acc = np.mean(ys==ps)*100
|
| 274 |
+
return acc
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
if __name__=='__main__':
|
| 279 |
+
experiment()
|
Meta-causal/code-stage1-pipeline/main_test_digit_v13.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from torch.utils.data import DataLoader
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import numpy as np
|
| 9 |
+
import click
|
| 10 |
+
import pandas as pd
|
| 11 |
+
|
| 12 |
+
from network import mnist_net_my as mnist_net
|
| 13 |
+
from network import adaptor_v2
|
| 14 |
+
from tools import causalaugment_v3 as causalaugment
|
| 15 |
+
from main_my_joint_v13_auto import evaluate
|
| 16 |
+
import data_loader_joint_v3 as data_loader
|
| 17 |
+
|
| 18 |
+
@click.command()
|
| 19 |
+
@click.option('--gpu', type=str, default='0', help='选择GPU编号')
|
| 20 |
+
@click.option('--svroot', type=str, default='./saved')
|
| 21 |
+
@click.option('--svpath', type=str, default=None, help='保存日志的路径')
|
| 22 |
+
@click.option('--channels', type=int, default=3)
|
| 23 |
+
@click.option('--factor_num', type=int, default=16)
|
| 24 |
+
@click.option('--stride', type=int, default=16)
|
| 25 |
+
@click.option('--epoch', type=str, default='best')
|
| 26 |
+
@click.option('--eval_mapping', type=bool, default=True, help='是否查看mapping学习效果')
|
| 27 |
+
def main(gpu, svroot, svpath, channels, factor_num,stride, epoch, eval_mapping):
|
| 28 |
+
evaluate_digit(gpu, svroot, svpath, channels, factor_num, stride,epoch, eval_mapping)
|
| 29 |
+
|
| 30 |
+
def evaluate_digit(gpu, svroot, svpath, channels=3, factor_num=16,stride=5,epoch='best', eval_mapping=True):
|
| 31 |
+
settings = locals().copy()
|
| 32 |
+
print(settings)
|
| 33 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
|
| 34 |
+
|
| 35 |
+
# 加载分类模型
|
| 36 |
+
if channels == 3:
|
| 37 |
+
cls_net = mnist_net.ConvNet().cuda()
|
| 38 |
+
elif channels == 1:
|
| 39 |
+
cls_net = mnist_net.ConvNet(imdim=channels).cuda()
|
| 40 |
+
if epoch == 'best':
|
| 41 |
+
print("loading weight of %s"%(epoch))
|
| 42 |
+
saved_weight = torch.load(os.path.join(svroot, 'best_cls_net.pkl'))
|
| 43 |
+
elif epoch == 'last':
|
| 44 |
+
print("loading weight of %s"%(epoch))
|
| 45 |
+
saved_weight = torch.load(os.path.join(svroot, 'last_cls_net.pkl'))
|
| 46 |
+
cls_net.load_state_dict(saved_weight)
|
| 47 |
+
cls_net.eval()
|
| 48 |
+
|
| 49 |
+
# 测试
|
| 50 |
+
str2fun = {
|
| 51 |
+
'mnist': data_loader.load_mnist,
|
| 52 |
+
'mnist_m': data_loader.load_mnist_m,
|
| 53 |
+
'usps': data_loader.load_usps,
|
| 54 |
+
'svhn': data_loader.load_svhn,
|
| 55 |
+
'syndigit': data_loader.load_syndigit,
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
columns = ['mnist', 'svhn', 'mnist_m', 'syndigit','usps']
|
| 59 |
+
target = ['svhn', 'mnist_m', 'syndigit','usps']
|
| 60 |
+
|
| 61 |
+
index = ['w/o do (original x)']
|
| 62 |
+
data_result = {}
|
| 63 |
+
|
| 64 |
+
for idx, data in enumerate(columns):
|
| 65 |
+
teset = str2fun[data]('test', channels=channels)
|
| 66 |
+
teloader = DataLoader(teset, batch_size=8, num_workers=0)
|
| 67 |
+
# 计算评价指标
|
| 68 |
+
teacc = evaluate(cls_net, teloader)
|
| 69 |
+
if data == 'mnist':
|
| 70 |
+
acc_avg = np.zeros(teacc.shape)
|
| 71 |
+
else:
|
| 72 |
+
acc_avg = acc_avg + teacc
|
| 73 |
+
data_result[data] = teacc
|
| 74 |
+
acc_avg = acc_avg/float(len(target))
|
| 75 |
+
|
| 76 |
+
data_result['Avg'] = acc_avg
|
| 77 |
+
|
| 78 |
+
df = pd.DataFrame(data_result,index = index)
|
| 79 |
+
print(df)
|
| 80 |
+
if svpath is not None:
|
| 81 |
+
df.to_csv(svpath)
|
| 82 |
+
|
| 83 |
+
if __name__=='__main__':
|
| 84 |
+
main()
|
| 85 |
+
|
Meta-causal/code-stage1-pipeline/main_test_pacs_v13.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from torch.utils.data import DataLoader
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import numpy as np
|
| 9 |
+
import click
|
| 10 |
+
import pandas as pd
|
| 11 |
+
|
| 12 |
+
from network import resnet as resnet
|
| 13 |
+
from network import adaptor_v2
|
| 14 |
+
from tools import causalaugment_v3 as causalaugment
|
| 15 |
+
from main_my_joint_v13_auto import evaluate
|
| 16 |
+
import data_loader_joint_v3 as data_loader
|
| 17 |
+
|
| 18 |
+
@click.command()
|
| 19 |
+
@click.option('--gpu', type=str, default='0', help='选择GPU编号')
|
| 20 |
+
@click.option('--svroot', type=str, default='./saved')
|
| 21 |
+
@click.option('--source_domain', type=str, default='art_painting', help='source domain')
|
| 22 |
+
@click.option('--svpath', type=str, default=None, help='保存日志的路径')
|
| 23 |
+
@click.option('--factor_num', type=int, default=16)
|
| 24 |
+
@click.option('--epoch', type=str, default='best')
|
| 25 |
+
@click.option('--stride', type=int, default=5)
|
| 26 |
+
@click.option('--eval_mapping', type=bool, default=False, help='是否查看mapping学习效果')
|
| 27 |
+
@click.option('--network', type=str, default='resnet18', help='项目文件保存路径')
|
| 28 |
+
def main(gpu, svroot, source_domain, svpath, factor_num, epoch, stride,eval_mapping, network):
|
| 29 |
+
evaluate_pacs(gpu, svroot, source_domain, svpath, factor_num, epoch, stride,eval_mapping, network)
|
| 30 |
+
|
| 31 |
+
def evaluate_pacs(gpu, svroot, source_domain, svpath, factor_num=16, epoch='best', stride=5,eval_mapping=False, network='resnet18'):
|
| 32 |
+
settings = locals().copy()
|
| 33 |
+
print(settings)
|
| 34 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
|
| 35 |
+
|
| 36 |
+
# 加载分类模型
|
| 37 |
+
if network == 'resnet18':
|
| 38 |
+
cls_net = resnet.resnet18(classes=7,c_dim=2048).cuda()
|
| 39 |
+
input_dim = 2048
|
| 40 |
+
if epoch == 'best':
|
| 41 |
+
print("loading weight of %s"%(epoch))
|
| 42 |
+
saved_weight = torch.load(os.path.join(svroot, 'best_cls_net.pkl'))
|
| 43 |
+
elif epoch == 'last':
|
| 44 |
+
print("loading weight of %s"%(epoch))
|
| 45 |
+
saved_weight = torch.load(os.path.join(svroot, 'last_cls_net.pkl'))
|
| 46 |
+
|
| 47 |
+
cls_net.load_state_dict(saved_weight)
|
| 48 |
+
cls_net.eval()
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
columns = ['art_painting', 'cartoon', 'photo', 'sketch']
|
| 52 |
+
target = [i for i in columns if i!=source_domain]
|
| 53 |
+
columns = [source_domain] + target
|
| 54 |
+
print("columns:",columns)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
index = ['w/o do (original x)']
|
| 58 |
+
|
| 59 |
+
data_result = {}
|
| 60 |
+
data_result_ours = {}
|
| 61 |
+
|
| 62 |
+
for idx, data in enumerate(columns):
|
| 63 |
+
teset = data_loader.load_pacs(data, 'test')
|
| 64 |
+
teloader = DataLoader(teset, batch_size=4, num_workers=0)
|
| 65 |
+
# 计算评价指标
|
| 66 |
+
acc = evaluate(cls_net, teloader)
|
| 67 |
+
data_result_ours[data] = acc
|
| 68 |
+
|
| 69 |
+
teacc = evaluate(cls_net, teloader)
|
| 70 |
+
if data == source_domain:
|
| 71 |
+
acc_avg = np.zeros(teacc.shape)
|
| 72 |
+
else:
|
| 73 |
+
acc_avg = acc_avg + teacc
|
| 74 |
+
data_result[data] = teacc
|
| 75 |
+
acc_avg = acc_avg/float(len(target))
|
| 76 |
+
|
| 77 |
+
data_result['Avg'] = acc_avg
|
| 78 |
+
|
| 79 |
+
df = pd.DataFrame(data_result,index = index)
|
| 80 |
+
print(df)
|
| 81 |
+
|
| 82 |
+
if svpath is not None:
|
| 83 |
+
df.to_csv(svpath)
|
| 84 |
+
|
| 85 |
+
if __name__=='__main__':
|
| 86 |
+
main()
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
|
Meta-causal/code-stage1-pipeline/network/adaptor_v2.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
class mapping(nn.Module):
|
| 8 |
+
def __init__(self, input_dim=1024, hidden_dim = 512, out_dim=1024, layernum=4):
|
| 9 |
+
'''
|
| 10 |
+
'''
|
| 11 |
+
super().__init__()
|
| 12 |
+
self.layernum = layernum
|
| 13 |
+
if layernum == 4:
|
| 14 |
+
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
| 15 |
+
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
|
| 16 |
+
self.fc3 = nn.Linear(hidden_dim, hidden_dim)
|
| 17 |
+
self.fc4 = nn.Linear(hidden_dim, out_dim)
|
| 18 |
+
elif layernum == 2:
|
| 19 |
+
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
| 20 |
+
self.fc2 = nn.Linear(hidden_dim, out_dim)
|
| 21 |
+
self.relu = nn.ReLU(inplace=True)
|
| 22 |
+
|
| 23 |
+
def forward(self, x):
|
| 24 |
+
''' x '''
|
| 25 |
+
if self.layernum == 4:
|
| 26 |
+
x = self.relu(self.fc1(x))
|
| 27 |
+
x = self.relu(self.fc2(x))
|
| 28 |
+
x = self.relu(self.fc3(x))
|
| 29 |
+
x = self.fc4(x)
|
| 30 |
+
elif self.layernum == 2:
|
| 31 |
+
x = self.relu(self.fc1(x))
|
| 32 |
+
x = self.fc2(x)
|
| 33 |
+
return x
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class effect_to_weight(nn.Module):
|
| 37 |
+
def __init__(self, input_dim = 512, hidden_dim = 256, out_dim = 1, layernum=2, hidden_dim2 = 128):
|
| 38 |
+
'''
|
| 39 |
+
'''
|
| 40 |
+
super().__init__()
|
| 41 |
+
|
| 42 |
+
self.layernum = layernum
|
| 43 |
+
if layernum == 2:
|
| 44 |
+
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
| 45 |
+
self.fc2 = nn.Linear(hidden_dim, out_dim)
|
| 46 |
+
elif layernum == 3:
|
| 47 |
+
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
| 48 |
+
self.fc2 = nn.Linear(hidden_dim, hidden_dim2)
|
| 49 |
+
self.fc3 = nn.Linear(hidden_dim2, out_dim)
|
| 50 |
+
self.relu = nn.ReLU(inplace=True)
|
| 51 |
+
|
| 52 |
+
def forward(self, x):
|
| 53 |
+
''' x '''
|
| 54 |
+
if self.layernum == 2:
|
| 55 |
+
x = self.relu(self.fc1(x))
|
| 56 |
+
x = self.fc2(x)
|
| 57 |
+
else:
|
| 58 |
+
x = self.relu(self.fc1(x))
|
| 59 |
+
x = self.relu(self.fc2(x))
|
| 60 |
+
x = self.fc3(x)
|
| 61 |
+
return x
|
| 62 |
+
|
| 63 |
+
|
Meta-causal/code-stage1-pipeline/network/mnist_net_my.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
class ConvNet(nn.Module):
|
| 7 |
+
''' 网络结构和cvpr2020的 M-ADA 方法一致 '''
|
| 8 |
+
def __init__(self, imdim=3):
|
| 9 |
+
super(ConvNet, self).__init__()
|
| 10 |
+
|
| 11 |
+
self.conv1 = nn.Conv2d(imdim, 64, kernel_size=5, stride=1, padding=0)
|
| 12 |
+
self.mp = nn.MaxPool2d(2)
|
| 13 |
+
self.relu1 = nn.ReLU(inplace=True)
|
| 14 |
+
self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=0)
|
| 15 |
+
self.relu2 = nn.ReLU(inplace=True)
|
| 16 |
+
self.fc1 = nn.Linear(128*5*5, 1024)
|
| 17 |
+
self.relu3 = nn.ReLU(inplace=True)
|
| 18 |
+
self.fc2 = nn.Linear(1024, 1024)
|
| 19 |
+
self.relu4 = nn.ReLU(inplace=True)
|
| 20 |
+
|
| 21 |
+
self.cls_head_src = nn.Linear(1024, 10)
|
| 22 |
+
# self.cls_head_tgt = nn.Linear(1024, 10)
|
| 23 |
+
# self.pro_head = nn.Linear(1024, 128)
|
| 24 |
+
|
| 25 |
+
def forward(self, x, mode='fc'):
|
| 26 |
+
if mode == 'c':
|
| 27 |
+
out4 = self.relu4(x)
|
| 28 |
+
p = self.cls_head_src(out4)
|
| 29 |
+
return p
|
| 30 |
+
elif mode == 'fc':
|
| 31 |
+
in_size = x.size(0)
|
| 32 |
+
out1 = self.mp(self.relu1(self.conv1(x)))
|
| 33 |
+
out2 = self.mp(self.relu2(self.conv2(out1)))
|
| 34 |
+
out2 = out2.view(in_size, -1)
|
| 35 |
+
out3 = self.relu3(self.fc1(out2))
|
| 36 |
+
out4_worelu = self.fc2(out3)
|
| 37 |
+
out4 = self.relu4(out4_worelu)
|
| 38 |
+
p = self.cls_head_src(out4)
|
| 39 |
+
return p, out4_worelu
|
| 40 |
+
|
| 41 |
+
# if mode == 'test':
|
| 42 |
+
# p = self.cls_head_src(out4)
|
| 43 |
+
# return p
|
| 44 |
+
# elif mode == 'train':
|
| 45 |
+
# p = self.cls_head_src(out4)
|
| 46 |
+
# # z = self.pro_head(out4)
|
| 47 |
+
# # z = F.normalize(z)
|
| 48 |
+
# return p,out4_worelu
|
| 49 |
+
# elif mode == 'p_f':
|
| 50 |
+
# p = self.cls_head_src(out4)
|
| 51 |
+
# return p, out4
|
| 52 |
+
#elif mode == 'target':
|
| 53 |
+
# p = self.cls_head_tgt(out4)
|
| 54 |
+
# z = self.pro_head(out4)
|
| 55 |
+
# z = F.normalize(z)
|
| 56 |
+
# return p,z
|
| 57 |
+
|
| 58 |
+
class ConvNetVis(nn.Module):
|
| 59 |
+
''' 方便可视化,特征提取器输出2-d特征
|
| 60 |
+
'''
|
| 61 |
+
def __init__(self, imdim=3):
|
| 62 |
+
super(ConvNetVis, self).__init__()
|
| 63 |
+
|
| 64 |
+
self.conv1 = nn.Conv2d(imdim, 64, kernel_size=5, stride=1, padding=0)
|
| 65 |
+
self.mp = nn.MaxPool2d(2)
|
| 66 |
+
self.relu1 = nn.ReLU(inplace=True)
|
| 67 |
+
self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=0)
|
| 68 |
+
self.relu2 = nn.ReLU(inplace=True)
|
| 69 |
+
self.fc1 = nn.Linear(128*5*5, 1024)
|
| 70 |
+
self.relu3 = nn.ReLU(inplace=True)
|
| 71 |
+
self.fc2 = nn.Linear(1024, 2)
|
| 72 |
+
self.relu4 = nn.ReLU(inplace=True)
|
| 73 |
+
|
| 74 |
+
self.cls_head_src = nn.Linear(2, 10)
|
| 75 |
+
self.cls_head_tgt = nn.Linear(2, 10)
|
| 76 |
+
self.pro_head = nn.Linear(2, 128)
|
| 77 |
+
|
| 78 |
+
def forward(self, x, mode='test'):
|
| 79 |
+
|
| 80 |
+
in_size = x.size(0)
|
| 81 |
+
out1 = self.mp(self.relu1(self.conv1(x)))
|
| 82 |
+
out2 = self.mp(self.relu2(self.conv2(out1)))
|
| 83 |
+
out2 = out2.view(in_size, -1)
|
| 84 |
+
out3 = self.relu3(self.fc1(out2))
|
| 85 |
+
out4 = self.relu4(self.fc2(out3))
|
| 86 |
+
|
| 87 |
+
if mode == 'test':
|
| 88 |
+
p = self.cls_head_src(out4)
|
| 89 |
+
return p
|
| 90 |
+
elif mode == 'train':
|
| 91 |
+
p = self.cls_head_src(out4)
|
| 92 |
+
z = self.pro_head(out4)
|
| 93 |
+
z = F.normalize(z)
|
| 94 |
+
return p,z
|
| 95 |
+
elif mode == 'p_f':
|
| 96 |
+
p = self.cls_head_src(out4)
|
| 97 |
+
return p, out4
|
| 98 |
+
#elif mode == 'target':
|
| 99 |
+
# p = self.cls_head_tgt(out4)
|
| 100 |
+
# z = self.pro_head(out4)
|
| 101 |
+
# z = F.normalize(z)
|
| 102 |
+
# return p,z
|
| 103 |
+
|
| 104 |
+
|
Meta-causal/code-stage1-pipeline/network/resnet.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch import nn
|
| 2 |
+
from torch.utils import model_zoo
|
| 3 |
+
#from torchvision.models.resnet import BasicBlock, model_urls, Bottleneck
|
| 4 |
+
from torchvision.models.resnet import BasicBlock, Bottleneck
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import ssl
|
| 8 |
+
# from torch import nn as nn
|
| 9 |
+
# from utils.util import *
|
| 10 |
+
|
| 11 |
+
ssl._create_default_https_context = ssl._create_unverified_context
|
| 12 |
+
|
| 13 |
+
all = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101','resnet152']
|
| 14 |
+
|
| 15 |
+
model_urls = {
|
| 16 |
+
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
|
| 17 |
+
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
|
| 18 |
+
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
|
| 19 |
+
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
|
| 20 |
+
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ResNet(nn.Module):
|
| 25 |
+
def __init__(self, block, layers,classes=7,c_dim=512):
|
| 26 |
+
self.inplanes = 64
|
| 27 |
+
super(ResNet, self).__init__()
|
| 28 |
+
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
|
| 29 |
+
bias=False)
|
| 30 |
+
self.bn1 = nn.BatchNorm2d(64)
|
| 31 |
+
self.relu = nn.ReLU(inplace=True)
|
| 32 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
| 33 |
+
self.layer1 = self._make_layer(block, 64, layers[0])
|
| 34 |
+
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
|
| 35 |
+
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
|
| 36 |
+
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
|
| 37 |
+
self.avgpool = nn.AvgPool2d(7, stride=1)
|
| 38 |
+
self.class_classifier = nn.Linear(c_dim, classes)
|
| 39 |
+
for m in self.modules():
|
| 40 |
+
if isinstance(m, nn.Conv2d):
|
| 41 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
| 42 |
+
elif isinstance(m, nn.BatchNorm2d):
|
| 43 |
+
nn.init.constant_(m.weight, 1)
|
| 44 |
+
nn.init.constant_(m.bias, 0)
|
| 45 |
+
|
| 46 |
+
def _make_layer(self, block, planes, blocks, stride=1):
|
| 47 |
+
downsample = None
|
| 48 |
+
if stride != 1 or self.inplanes != planes * block.expansion:
|
| 49 |
+
downsample = nn.Sequential(
|
| 50 |
+
nn.Conv2d(self.inplanes, planes * block.expansion,
|
| 51 |
+
kernel_size=1, stride=stride, bias=False),
|
| 52 |
+
nn.BatchNorm2d(planes * block.expansion),
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
layers = []
|
| 56 |
+
layers.append(block(self.inplanes, planes, stride, downsample))
|
| 57 |
+
self.inplanes = planes * block.expansion
|
| 58 |
+
for i in range(1, blocks):
|
| 59 |
+
layers.append(block(self.inplanes, planes))
|
| 60 |
+
|
| 61 |
+
return nn.Sequential(*layers)
|
| 62 |
+
def forward(self, x, mode='fc'):
|
| 63 |
+
if mode == 'c':
|
| 64 |
+
return self.class_classifier(x)
|
| 65 |
+
else:
|
| 66 |
+
x = self.conv1(x)
|
| 67 |
+
x = self.bn1(x)
|
| 68 |
+
x = self.relu(x)
|
| 69 |
+
x = self.maxpool(x)
|
| 70 |
+
|
| 71 |
+
x = self.layer1(x)
|
| 72 |
+
x = self.layer2(x)
|
| 73 |
+
x = self.layer3(x)
|
| 74 |
+
x = self.layer4(x)
|
| 75 |
+
x = self.avgpool(x)
|
| 76 |
+
x = x.view(x.size(0), -1)
|
| 77 |
+
# print("x.shape:",x.shape)
|
| 78 |
+
return self.class_classifier(x), x
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def resnet18(pretrained=True, **kwargs):
|
| 82 |
+
"""Constructs a ResNet-18 model.
|
| 83 |
+
Args:
|
| 84 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
| 85 |
+
"""
|
| 86 |
+
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
|
| 87 |
+
if pretrained:
|
| 88 |
+
print("-------------------------------------loading pretrain weights----------------------------------")
|
| 89 |
+
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
|
| 90 |
+
return model
|
| 91 |
+
|
| 92 |
+
def resnet50(pretrained=True, **kwargs):
|
| 93 |
+
"""Constructs a ResNet-50 model.
|
| 94 |
+
Args:
|
| 95 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
| 96 |
+
"""
|
| 97 |
+
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
|
| 98 |
+
if pretrained:
|
| 99 |
+
print("-------------------------------------loading pretrain weights----------------------------------")
|
| 100 |
+
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
|
| 101 |
+
return model
|
Meta-causal/code-stage1-pipeline/network/wideresnet.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BasicBlock(nn.Module):
|
| 8 |
+
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
|
| 9 |
+
super(BasicBlock, self).__init__()
|
| 10 |
+
self.bn1 = nn.BatchNorm2d(in_planes)
|
| 11 |
+
self.relu1 = nn.ReLU(inplace=True)
|
| 12 |
+
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
| 13 |
+
padding=1, bias=False)
|
| 14 |
+
self.bn2 = nn.BatchNorm2d(out_planes)
|
| 15 |
+
self.relu2 = nn.ReLU(inplace=True)
|
| 16 |
+
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
|
| 17 |
+
padding=1, bias=False)
|
| 18 |
+
self.droprate = dropRate
|
| 19 |
+
self.equalInOut = (in_planes == out_planes)
|
| 20 |
+
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
|
| 21 |
+
padding=0, bias=False) or None
|
| 22 |
+
def forward(self, x):
|
| 23 |
+
if not self.equalInOut:
|
| 24 |
+
x = self.relu1(self.bn1(x))
|
| 25 |
+
else:
|
| 26 |
+
out = self.relu1(self.bn1(x))
|
| 27 |
+
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
|
| 28 |
+
if self.droprate > 0:
|
| 29 |
+
out = F.dropout(out, p=self.droprate, training=self.training)
|
| 30 |
+
out = self.conv2(out)
|
| 31 |
+
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
|
| 32 |
+
|
| 33 |
+
class NetworkBlock(nn.Module):
|
| 34 |
+
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
|
| 35 |
+
super(NetworkBlock, self).__init__()
|
| 36 |
+
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
|
| 37 |
+
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
|
| 38 |
+
layers = []
|
| 39 |
+
for i in range(int(nb_layers)):
|
| 40 |
+
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
|
| 41 |
+
return nn.Sequential(*layers)
|
| 42 |
+
def forward(self, x):
|
| 43 |
+
return self.layer(x)
|
| 44 |
+
|
| 45 |
+
class WideResNet(nn.Module):
|
| 46 |
+
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
|
| 47 |
+
super(WideResNet, self).__init__()
|
| 48 |
+
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
|
| 49 |
+
assert((depth - 4) % 6 == 0)
|
| 50 |
+
n = (depth - 4) / 6
|
| 51 |
+
block = BasicBlock
|
| 52 |
+
# 1st conv before any network block
|
| 53 |
+
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
|
| 54 |
+
padding=1, bias=False)
|
| 55 |
+
# 1st block
|
| 56 |
+
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
|
| 57 |
+
# 2nd block
|
| 58 |
+
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
|
| 59 |
+
# 3rd block
|
| 60 |
+
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
|
| 61 |
+
# global average pooling and classifier
|
| 62 |
+
self.bn1 = nn.BatchNorm2d(nChannels[3])
|
| 63 |
+
self.relu = nn.ReLU(inplace=True)
|
| 64 |
+
self.fc = nn.Linear(nChannels[3], num_classes)
|
| 65 |
+
self.nChannels = nChannels[3]
|
| 66 |
+
|
| 67 |
+
for m in self.modules():
|
| 68 |
+
if isinstance(m, nn.Conv2d):
|
| 69 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
| 70 |
+
elif isinstance(m, nn.BatchNorm2d):
|
| 71 |
+
m.weight.data.fill_(1)
|
| 72 |
+
m.bias.data.zero_()
|
| 73 |
+
elif isinstance(m, nn.Linear):
|
| 74 |
+
m.bias.data.zero_()
|
| 75 |
+
def forward(self, x, mode='fc'):
|
| 76 |
+
if mode == 'c':
|
| 77 |
+
return self.fc(x)
|
| 78 |
+
else:
|
| 79 |
+
out = self.conv1(x)
|
| 80 |
+
out = self.block1(out)
|
| 81 |
+
out = self.block2(out)
|
| 82 |
+
out = self.block3(out)
|
| 83 |
+
out = self.relu(self.bn1(out))
|
| 84 |
+
out = F.avg_pool2d(out, 8)
|
| 85 |
+
out = out.view(-1, self.nChannels)
|
| 86 |
+
return self.fc(out), out
|
Meta-causal/code-stage1-pipeline/run_PACS/run_my_joint_v13_test.sh
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# $1 gpuid
|
| 3 |
+
# $2 runid
|
| 4 |
+
|
| 5 |
+
# base方法
|
| 6 |
+
cd ..
|
| 7 |
+
epochs=30
|
| 8 |
+
clsadapt=True
|
| 9 |
+
lr=0.01
|
| 10 |
+
factor_num=16
|
| 11 |
+
lr_scheduler=cosine
|
| 12 |
+
lambda_causal=1
|
| 13 |
+
lambda_re=1
|
| 14 |
+
batchsize=6
|
| 15 |
+
stride=5
|
| 16 |
+
randm=True
|
| 17 |
+
randn=True
|
| 18 |
+
autoaug=CA_multiple
|
| 19 |
+
network=resnet18
|
| 20 |
+
UniqueExpName=pipelineAugWoNorm
|
| 21 |
+
|
| 22 |
+
root=/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-PACS/
|
| 23 |
+
data=art_painting
|
| 24 |
+
svroot=$root/${data}/${autoaug}_${factor_num}fa_v2_ep${epochs}_lr${lr}_${lr_scheduler}_base0.01_bs${batchsize}_lamCa_${lambda_causal}_lamRe${lambda_re}_adt4_cls1_EW2_70_rm${randm}_rn${randn}_str${stride}_${UniqueExpName}
|
| 25 |
+
#python3 main_my_joint_v13_auto.py --gpu $1 --data ${data} --epochs $epochs --autoaug $autoaug --lambda_causal ${lambda_causal} --lambda_re ${lambda_re} --lr $lr --svroot $svroot --clsadapt $clsadapt --factor_num $factor_num --lr_scheduler ${lr_scheduler} --batchsize ${batchsize} --network ${network} --randm ${randm} --randn ${randn} --stride ${stride}
|
| 26 |
+
|
| 27 |
+
test_epoch=best
|
| 28 |
+
#python3 main_test_pacs_v13.py --gpu $1 --source_domain $data --svroot $svroot --svpath $svroot/${data}_${factor_num}factor_${test_epoch}_test_check.csv --factor_num $factor_num --epoch $test_epoch --network ${network} --stride ${stride}
|
| 29 |
+
|
| 30 |
+
python3 AllEpochs_test_pacs_v13.py --gpu $1 --source_domain $data --svroot $svroot --svpath $svroot/${data}_${factor_num}factor_${test_epoch}_test_check.csv --factor_num $factor_num --epoch $test_epoch --network ${network} --stride ${stride}
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
|
Meta-causal/code-stage1-pipeline/run_digits/run_my_joint_test.sh
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# $1 gpuid
|
| 3 |
+
|
| 4 |
+
cd ..
|
| 5 |
+
epochs=100
|
| 6 |
+
clsadapt=True
|
| 7 |
+
lr=1e-4
|
| 8 |
+
lr_scheduler=Step
|
| 9 |
+
factor_num=14
|
| 10 |
+
test_epoch=best
|
| 11 |
+
lambda_causal=1
|
| 12 |
+
lambda_re=1
|
| 13 |
+
batchsize=32
|
| 14 |
+
stride=3
|
| 15 |
+
randm=True
|
| 16 |
+
randn=True
|
| 17 |
+
autoaug=CA_multiple
|
| 18 |
+
UniqueExpName='pipelineAugWoNorm'
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
root=/data/work-gcp-europe-west4-a/yuqian_fu/datasets/SingleSourceDG/saved-digit
|
| 22 |
+
svroot=$root/${autoaug}_${factor_num}fa_all_ep${epochs}_lr${lr}_lr_scheduler${lr_scheduler}0.8_bs${batchsize}_lamCa_${lambda_causal}_lamRe_${lambda_re}_cls1_adt2_EW2_100_rm${randm}_rn${randn}_str${stride}_${UniqueExpName}
|
| 23 |
+
|
| 24 |
+
#python3 main_my_joint_v13_auto.py --gpu $1 --data mnist --epochs $epochs --autoaug $autoaug --lambda_causal ${lambda_causal} --lambda_re ${lambda_re} --lr $lr --lr_scheduler $lr_scheduler --svroot $svroot --clsadapt $clsadapt --factor_num $factor_num --batchsize ${batchsize} --randm ${randm} --randn ${randn} --stride ${stride}
|
| 25 |
+
|
| 26 |
+
#python3 main_test_digit_v13.py --gpu $1 --svroot $svroot --svpath $svroot/${factor_num}factor_${test_epoch}.csv --factor_num $factor_num --epoch $test_epoch --stride ${stride}
|
| 27 |
+
|
| 28 |
+
python3 AllEpochs_test_digit_v13.py --gpu $1 --svroot $svroot --svpath $svroot/${factor_num}factor_${test_epoch}.csv --factor_num $factor_num --epoch $test_epoch --stride ${stride}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
Meta-causal/code-stage1-pipeline/saved-PACS/art_painting/CA_multiple_16fa_v2_ep70_lr0.01_cosine_base0.01_bs6_lamCa_1_lamRe1_adt4_cls1_EW2_70_rmTrue_rnTrue_str5/events.out.tfevents.1719926752.hala
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a44a49f0a1b3c59b9763c67ea85708ef8b56cae5fe4336f0383f5f71ba0dac84
|
| 3 |
+
size 40
|
Meta-causal/code-stage1-pipeline/saved-PACS/art_painting/CA_multiple_16fa_v2_ep70_lr0.01_cosine_base0.01_bs6_lamCa_1_lamRe1_adt4_cls1_EW2_70_rmTrue_rnTrue_str5/log.log
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{'gpu': '0', 'data': 'art_painting', 'ntr': None, 'translate': None, 'autoaug': 'CA_multiple', 'n': 3, 'stride': 5, 'factor_num': 16, 'epochs': 70, 'nbatch': 100, 'batchsize': 6, 'lr': 0.01, 'lr_scheduler': 'cosine', 'svroot': 'saved-PACS//art_painting/CA_multiple_16fa_v2_ep70_lr0.01_cosine_base0.01_bs6_lamCa_1_lamRe1_adt4_cls1_EW2_70_rmTrue_rnTrue_str5', 'clsadapt': True, 'lambda_causal': 1.0, 'lambda_re': 1.0, 'randm': True, 'randn': True, 'network': 'resnet18'}
|
Meta-causal/code-stage1-pipeline/saved-digit/CA_multiple_14fa_all_ep500_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3/events.out.tfevents.1719925086.hala
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7946f93077ec2136f75fc090a5762ce810be71cc78d5201e8a671217a678c563
|
| 3 |
+
size 40
|
Meta-causal/code-stage1-pipeline/saved-digit/CA_multiple_14fa_all_ep500_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3/events.out.tfevents.1719925314.hala
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2021c61739fbe1f9c066067b4e5903d8d2d6c1c44865e1e9c61449eb3d90327
|
| 3 |
+
size 40
|
Meta-causal/code-stage1-pipeline/saved-digit/CA_multiple_14fa_all_ep500_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3/events.out.tfevents.1719925652.hala
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fafb4b17d350157735eb6174ff44bafcea7ab8bf86948df3421447ef45ffcae3
|
| 3 |
+
size 40
|
Meta-causal/code-stage1-pipeline/saved-digit/CA_multiple_14fa_all_ep500_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3/log.log
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{'gpu': '0çç', 'data': 'mnist', 'ntr': None, 'translate': None, 'autoaug': 'CA_multiple', 'n': 3, 'stride': 3, 'factor_num': 14, 'epochs': 500, 'nbatch': 100, 'batchsize': 32, 'lr': 0.0001, 'lr_scheduler': 'Step', 'svroot': 'saved-digit/CA_multiple_14fa_all_ep500_lr1e-4_lr_schedulerStep0.8_bs32_lamCa_1_lamRe_1_cls1_adt2_EW2_100_rmTrue_rnTrue_str3', 'clsadapt': True, 'lambda_causal': 1.0, 'lambda_re': 1.0, 'randm': True, 'randn': True, 'network': 'resnet18'}
|