From 28ee6a99f1f6b0d1b27f48cbddd7e975721540a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alonso=20Garc=C3=ADa?= Date: Wed, 11 Aug 2021 02:15:14 -0500 Subject: [PATCH 1/4] utils and dataset implemented --- core/dataset.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/dataset.py b/core/dataset.py index 948849d..e058a31 100644 --- a/core/dataset.py +++ b/core/dataset.py @@ -1,5 +1,6 @@ import numpy as np -import scipy.misc +#import scipy.misc +import imageio import os from PIL import Image from torchvision import transforms @@ -25,11 +26,12 @@ def __init__(self, root, is_train=True, data_len=None): train_file_list = [x for i, x in zip(train_test_list, img_name_list) if i] test_file_list = [x for i, x in zip(train_test_list, img_name_list) if not i] if self.is_train: - self.train_img = [scipy.misc.imread(os.path.join(self.root, 'images', train_file)) for train_file in + #se cambió scipy.misc.imread por imageio.imread + self.train_img = [imageio.imread(os.path.join(self.root, 'images', train_file)) for train_file in train_file_list[:data_len]] self.train_label = [x for i, x in zip(train_test_list, label_list) if i][:data_len] if not self.is_train: - self.test_img = [scipy.misc.imread(os.path.join(self.root, 'images', test_file)) for test_file in + self.test_img = [imageio.imread(os.path.join(self.root, 'images', test_file)) for test_file in test_file_list[:data_len]] self.test_label = [x for i, x in zip(train_test_list, label_list) if not i][:data_len] From b5ab1123e7c279f29df5e49252200b66f2b31e09 Mon Sep 17 00:00:00 2001 From: 8kta Date: Mon, 23 Aug 2021 05:48:16 +0000 Subject: [PATCH 2/4] debugging in debian --- core/anchors.py | 0 core/dataset.py | 0 core/model.py | 0 core/resnet.py | 0 core/utils.py | 15 +++++++++++++++ train.py | 6 ++++-- 6 files changed, 19 insertions(+), 2 deletions(-) mode change 100644 => 100755 core/anchors.py mode change 100644 => 100755 core/dataset.py mode change 100644 => 100755 core/model.py mode change 100644 => 100755 core/resnet.py mode change 100644 => 100755 core/utils.py mode change 100644 => 100755 train.py diff --git a/core/anchors.py b/core/anchors.py old mode 100644 new mode 100755 diff --git a/core/dataset.py b/core/dataset.py old mode 100644 new mode 100755 diff --git a/core/model.py b/core/model.py old mode 100644 new mode 100755 diff --git a/core/resnet.py b/core/resnet.py old mode 100644 new mode 100755 diff --git a/core/utils.py b/core/utils.py old mode 100644 new mode 100755 index 536c166..399f619 --- a/core/utils.py +++ b/core/utils.py @@ -5,6 +5,7 @@ import logging _, term_width = os.popen('stty size', 'r').read().split() +#term_width = 80 term_width = int(term_width) TOTAL_BAR_LENGTH = 40. @@ -100,5 +101,19 @@ def init_log(output_dir): logging.getLogger('').addHandler(console) return logging +#se agrega para crear las carpetas con los permisos correspondientes +def create_dir(dir_str): + + try: + pardir = os.path.abspath(f'{dir_str}') + original_umask = os.umask(0o000) + os.makedirs(pardir,exist_ok=True) + + finally: + os.umask(original_umask) + print(f'{dir_str} folder created:', os.path.isdir(pardir)) + + return os.path.abspath('./save_dir') + if __name__ == '__main__': pass diff --git a/train.py b/train.py old mode 100644 new mode 100755 index 47a5524..df3e2f0 --- a/train.py +++ b/train.py @@ -5,14 +5,16 @@ from torch.optim.lr_scheduler import MultiStepLR from config import BATCH_SIZE, PROPOSAL_NUM, SAVE_FREQ, LR, WD, resume, save_dir from core import model, dataset -from core.utils import init_log, progress_bar +from core.utils import init_log, progress_bar, create_dir os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3' start_epoch = 1 save_dir = os.path.join(save_dir, datetime.now().strftime('%Y%m%d_%H%M%S')) if os.path.exists(save_dir): raise NameError('model dir exists!') -os.makedirs(save_dir) + +#se agrego la funcion create_dir en utils +save_dir = create_dir('save_dir') logging = init_log(save_dir) _print = logging.info From b3acdc0d06754e6b3eb5624e538bfb25c68a2cc0 Mon Sep 17 00:00:00 2001 From: 8kta Date: Sat, 4 Sep 2021 20:27:59 +0000 Subject: [PATCH 3/4] starting configuring dataset --- Notebook.py | 21 ++++++ ShuffleMNIST | 1 + __pycache__/config.cpython-39.pyc | Bin 0 -> 374 bytes core/__init__.py | 0 core/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 142 bytes core/__pycache__/anchors.cpython-39.pyc | Bin 0 -> 3192 bytes core/__pycache__/dataset.cpython-39.pyc | Bin 0 -> 3228 bytes core/__pycache__/model.cpython-39.pyc | Bin 0 -> 4530 bytes core/__pycache__/resnet.cpython-39.pyc | Bin 0 -> 6094 bytes core/__pycache__/utils.cpython-39.pyc | Bin 0 -> 2657 bytes save_dir/log.log | 1 + train.py | 91 ++++++++++++++++++++--- 12 files changed, 104 insertions(+), 10 deletions(-) create mode 100644 Notebook.py create mode 160000 ShuffleMNIST create mode 100644 __pycache__/config.cpython-39.pyc create mode 100644 core/__init__.py create mode 100644 core/__pycache__/__init__.cpython-39.pyc create mode 100644 core/__pycache__/anchors.cpython-39.pyc create mode 100644 core/__pycache__/dataset.cpython-39.pyc create mode 100644 core/__pycache__/model.cpython-39.pyc create mode 100644 core/__pycache__/resnet.cpython-39.pyc create mode 100644 core/__pycache__/utils.cpython-39.pyc create mode 100644 save_dir/log.log diff --git a/Notebook.py b/Notebook.py new file mode 100644 index 0000000..cd8149d --- /dev/null +++ b/Notebook.py @@ -0,0 +1,21 @@ +# Funcion para crear directorios, es necesario e abspath? +##### se agregó a utils +import os + +def create_dir(child_dir_str): + + try: + pardir = os.path.abspath('static/') + original_umask = os.umask(0o000) + os.makedirs(pardir, exist_ok=True) + child_dir = os.path.join(pardir,child_dir_str) + child_dir = os.path.abspath('static/') + os.makedirs(child_dir, exist_ok=True) + finally: + os.umask(original_umask) + print('static folder created:', os.path.isdir(pardir)) + print('img folder created: ', os.path.isdir(child_dir)) + +#create_dir('img') + +####################################################### \ No newline at end of file diff --git a/ShuffleMNIST b/ShuffleMNIST new file mode 160000 index 0000000..e4aa4da --- /dev/null +++ b/ShuffleMNIST @@ -0,0 +1 @@ +Subproject commit e4aa4da84c2cb772867a1b142472890dca80e17e diff --git a/__pycache__/config.cpython-39.pyc b/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b30e3a3be0920e6be7a44b735ded37cb938e4b80 GIT binary patch literal 374 zcmYe~<>g`k0tHj$#63XzF^Gc<7=auIATHJc5-AK(3@MCJj44b}OexG!%qc8UEGeu} ztSM|!Y$@zf>?s^k94VYpoGDzv44T|81%O&!vVjN|Afd_h@&M4ZA`m2SgKgxPm}8w zmy=_NvqyZeXO!zLo`4|#0RLb|pLoAe-zaux#}F`s%hN9)Gz6lA$tUO*Q@G15&S1wd z*Lb%e*T7qBMXAN5xv5cHC8@g`k0>5mXL=gQLL?8o3AjbiSi&=m~3PUi1CZpd~|A5{PV literal 0 HcmV?d00001 diff --git a/core/__pycache__/anchors.cpython-39.pyc b/core/__pycache__/anchors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eca4b23ec96fa498097f88850c15ffe0962e57d4 GIT binary patch literal 3192 zcmaJ@&5s*N6|d?qx7%%x=OdGuB!m`OqH%V1CdqCjG=!ZXvJ#8L2q^)fMk{Z}RrWZs zyPfK`7)NCZ5pqJ2R@~q;j>Lfr2gDiepMbd0ClErBuZRP?+THN0YJ0{LKB`r(>b-ie zzTT_f>!@1w2+EIcU5Oso3Hh617E2k-9n|!9AdE2DBOyg=^o+h4nv`&hnapA~b6BZk zbO>`_P*#3!L+-H(^Z$#y%4%#0n)Mg-RKIP8Hly5Z8*G`a{EUVUyUfn9b@WQyW#=Cm z;2Vo08`^SH(+i;HgtCk568q9~GxT_cdrv9FUhk2{m#5e_IcU(_d;j2j-@o6y z_kQ?ZXbj$-29Qs$gS`B?c>c+Yt$*&nyd8hz*I)2Y_8VsBAAkDpgZdBuzCWeV=%Awf zYrkRVb}t%nkvnN7x{T*m+I_-vJ8eZhp8HWc;H|7FqO6->k$0wdU^e{_1l+CZPZakt zNW&B*G{RQI0cjZ^^KuRy<&|%tra!=l#94+!_!$|Kj6O7&0ctYyxiMC~Mc&lBbuzAc zmZX`jwW%a`$w%~qPsg^jnY~HI4q69o32h0ji`GS3&giit>2XQTG@;VDNG5dbNl!~` z)z4gsduHYHWL%LB^DdIH5392D@j0@>Dw|}DoF~$mnB%If9(#q1`4eMYJ)`L_Xx3!y z*ke9)A%$j5ok&(rPP8Pea*0t!pBXb+X`8o+ti2}nwba$;b;osCzDqv%Y`iR&jw=%~ zUXdkPUn7|>mvM&bD!EAH%9&Yf+Ws}Iwb!+llos|@g>@bFwP!}#*i>3|MEP>Tsk_-I zWdBFwJVETQuvK_<74~a-mUHT6#%s8(sx;&ZZf1ooPp}3&0N+Du%GEPcL;$3gTvO5s z)tc5v^zpY*-+g?5SxjlQO}vfu=xc~>FF@(=m?LU=z<4_v_Oc*~TZf78^x)ng5>bC9 zN}DOqvToej3!+{x=*I0t^i{maX^^z%N{e>gcreVG>0vbBdx370qLv2Te$?UVcEEQ! zJHgiBcJSjyVJdhw6!Bir;&H}Bv)E3$7hIXI5L^r13AU2ac90xx2d9?mG`eus;s4Wa zbbUKGLLJ>ed!tx$Za7<^83`VpUGPS*rG$n0LBkLV9Htfs@Wp1?Xuum(3x&c>p&qfQ zDB^1!v5aO0Cge_`sBqvVD#~B>rwRt9CdiJ~oxVcB+*WkdO&i9JSi_KN6+|>@`9^c* zulYge&dH5g{ zL_kA7s&of;dTIXRUh|-uH&r2%eg}kjF13t0RbS2U443N9Sf@TdmtKHe)jD0r=jqX_ z^r~@{e&v;67+*MMF>=`u*D%{u1>`K){V0~ zRQ@@3C&)$O_p{97NDH~cnNUiwn)Q@Qwel}8nz;v=6T3xnd$pzW}-=CrNOson-M7V@qwE7EvG z)SW#3@bs=4-a&51!~S5TAw&OdOx54UldtNievllgNCO-v!yXGB@ZbP|dh5*~W*Q_4 zY+8qbh8hh7FeAwt4Ra}%A=gd{9$0KoZ<@aq_}CT7n|*k>m@OhX^&$3@Cp>iobgOp8vl zH5B?W7o?%K=N?9aHRFDI2V8m+gwSi$#UotDL$eIaP!G<-Lqs*+pl^T+=yiHPYp*P7 zymE}1A@*UFyST8Z8#kK``{l2Gsmh=Ciyy!@F+5doZ>o{q7OUtN-qLyFu42`jY}lQ` z@~``vpI%l*3Vx<)XHy0uz6pY?w5R%CFZyZ$v90>PcuV!QQyf5fyW0^e-Q*T7GZPzX zP=UnBmgA@o4;QYYQEm?eu5RIy)Z(JJsoO<7ysPqI8i2UeH#aE0&#hAJmUr2uPVs$i JS=MKc{crlAFy;UN literal 0 HcmV?d00001 diff --git a/core/__pycache__/dataset.cpython-39.pyc b/core/__pycache__/dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6764dac1b74cc5f33d276c06772ab47ea764c43 GIT binary patch literal 3228 zcmbUjO>Y}TbY^zfUavorwxJ{is?e4SBS>04PJ{*;phT)n)ua+ys8)+-;%r#&I?S%_sBvd4jP|n=Sp}aTi7#mt>S!3_pD;GczN+j1?6JVsb(7u}-9 z&CqU_+!7_H2)B6Q3gLw<<~pLxi|;Y{vv9c0OV_Ad5miwWm5qrlxpw)gf4V9pHLVw7xVT1uH$uCbwTlsQ>(i@i$V7G*FM7-o!Cfm_l~s9gAn ztWgf}a>dTdoMZ;-jJv=h4lfV)m2z3T9I#r>B_!`_2^B4&vb#QzP~p{okx=CX993&{ zod823nM*IF-TfN(nxUU0Ug*Co!kj(@(Go;nDw5PoI$@9^_s$*oZqwNva3yIX*!|GM z==l{G$uxi#$;kQ1cUX`1SVpCpu}|5ixk9cJS%T&3hGKKa6b%%;qzM|_es+F6Zi{(8 z6iE`q@E=D>JTC&o1SgGiY& zj?+4mWsvS-vV;59f>2zi%1lJKmMc5d^B}POQ_uVMNWW=;K~jXPa9pCaYsaQ3KAmMZ6{&O&AD3nJOkn5NdNat1}>i4vpTwJ{UCP(*G8cs)o` z?R2-M`A6ot?2CSwuUC;f^4r40lu%|EM8b9QO-OMk@(uafBLpshK;5VgzCbj87itwn0hcwk@4t4I>>>$u{tXmn)uxFehFPI=ZUU@HhTtI?Y)Oq z-~TVI_Kj)vpPFPWGuE(S+l?JG<_;!>Yn(lG5*`I+JLaO!cHV*&S%-zX(Q$9bg!{7e+aS;|DbNP$6-n_Zky1aArxc&8w#n#Om`dh3|>G+hh zAVoe7Kp7CE5dQ0aM<^C`08RbodlHp!OKUQ2`%?OsCF-LJXGIcpg=?Q&KE2#{?Ul2x zbJmY|++LD#N2251shsdeEQ4+wrGEH&2-#$>#;XE)hIC5}c|NwPiq>=Z(}?1y0y7^INp2q*fAK#kJo8l%52#6I+XTd zU8_H&tms0!b6Gx(r(xUF6;&B$mQU-Lfod7c=K93|9tzPQYMODh7PNGH=!4}8uul~S zj|F9QB$VCUcC#o^$EAtO&?3ho@DqsnGBR4R0R#i+fAHLC_cn@$5I G$NmK7pxAK$ literal 0 HcmV?d00001 diff --git a/core/__pycache__/model.cpython-39.pyc b/core/__pycache__/model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f5a7eaac14ad24ac171fa2375cf979248760071 GIT binary patch literal 4530 zcmZ`-Nt4^g74F7H5F}?`H1ZO~Ru-lzBUy^GxD?rn947~RtXOiALhM2yHpn3j0#u_p znwbKtl9F6<4s%GAe*l@A{z;BG_sLZ`%_W!Ia)|T224^cPVD&@y>jm9!`QB^BtyaMB z{Nc^l@J{z;AVkAucLD8=7E1QR@ELmqNYbt`3I^|prGVw?@_&@nxB>JGioGj%7e z4gJuc&!~q1X1Zx(*bJM)R@gFpFI^ZehKs|caEY^DF;Nr#Qzra`50;W8+lg@X+{&zQqP7aMhTq(f+(hV$(~qKYX&JRd+N}#w4`umZpk9o@GQe~=REHPyc&4^jA4d9@AYU6Q8TO5 zU%(6IJpbIdhF~~Aedn0@j2-_88$7119@D;$Eg~L>G|f6{?WT5%@hFj!TH24Ie%4n} zBx$Vn?&jIv4WV6;KhEB$UN@@OP3?S?y#KLwRbL#Si(crW*Ujf#J4KRq@vcWvCyk3D zik`FoT)(!P50h(gniNGp$2-r8{F+QeQe4}4aQ}x8_O(tflWW6VBlH2Yku=Ryh3*SI-N3 zKKi*w7L&!pB#LwpMbO(grFt`p9*yI)>M>fB*XT2?xjA%_G=-6!ZQ>R5$}2=jWet>q z;LYdFz;HTfTzIILp5nV8CtfW)Fqu!-akYv~OP5OK0;*aY5EzZfdWpUC!%>KQ=f3pNE@%=la-j^YD=#TRT~pD*1x-XA#=7vSOm8u-M+ zT!6*Yn=!O%#wf`c{jxTw5z0^NGxjcf!Y97z^Gu&_ICT|F8)Y5y>SZ1An0;=|X{~E- zv1wBUgBE%NwJ=>Q1Axuaj8SW_Tp~7?fv6wZlLmTMO6+5>Dgs30Mvq{LMmE)2={@1^ zvQG~uEwx4?OlvS%DC=cwi>8i_Y6sX^+i z_^p*|CnFIF?a6w%ej*dK3CS!QnOwrT&_g3J%9^Z08e8Rh>GUj|W#!14Y+&unfWt6IiF{^}=}es&(uS!kEajE_3AiV{h!I@aN7i4hM-DdN z$Zunz%DE|Gw-Hz4{eID|NeXDXQ8`(XAzRWlIQDLgNKQ_oolZVFfOp9_>m}MwlkB;r zt?ldb`?TUqL{42u2e;CGp*s0+beqt^Ag^axUmvzF`pq2mw{N9+Cr*posGQ9EhAapL zj4UW98Ck56EQ|vo0~7qno}bd{VFaFkyxq1!f{-i>`uR9gyL6(qB~8Xc5Fqanxwv`x z5qLjE`8NtX6By>b4u*F7nMn2}B@vQ_9TNbdlj??O|6xBhu=b#B%d5oxH3&jQ+V5o% z@)4w?#kM4jX}>IgMt%G}Z3DvO9U5(>{G8aHNK}k)?1)&!2+E_ecC+yid5lq)8%r4< zNHPxjE)fb{I_S!L7@1+v+E~Qe>&0p}k=jKjkQWvBxs@UtfXGCS@)R1E-=Q^5(N^_) z<9uAm?-BQHBJY5-FINakY7c`7`=flU!iF4WQ8$UzSi*MZmC)Ufa4zkd1R|`{n1V90 z(2o(YJ3?_D?H$@now~WL1KYl&<)yL{+PaUoydza z8S*7^;CUvq&@!?3LcT%4h76M4#g9OM3yAZC6o28**fZWW7l)_5qI?w@%?x;9!i1$9 zgct;yDxak9e8UH6W#fZnyT%7EPxu!=qb&qCqEj|wTgbL5Pc(-hjOnXn2p9PcX30JX zprfW@*-IdM2YsV6>@eIM`T6?s<;<9*qo6^tVp zpKyV>ozL2~_9`vQf@V0hZuMzNh)Zb4YVpwCyiJV32Of<1XCx>NsMl+(K# zpcgTIYR@Pd({E4?)~@fDv|@f~w8%P;NH{|TGekFKKTyhL|5d*>YBHzggw0EK)=wLcOuf+CUW z`Zy~djgth4zGL#J#(QZVs|N`#R#Mx=cvuPBkx4pk*KyY)Z#LqDyPf847`9Ifw~3I( z!}?s`(MY?a{$8#^r~=oxx$4hiM-#yGSwHI;rT-373(7SaZVUm`a1}Wi-68@m{|s(> zqdJO(dbn!tF56kQ-5qBg%E{vtXn0yTI}aDCQSn&iJqgGqc~Er2DI8Sgr$k7jVUw8K z)t?20$umvl&>p~}+wU2qls--OJ}~zblfqW{G1-dQ;WJEMQAy4c+27-sU!Ci2S_ QoR8i(pS`lNa^3g-2RRo?l>h($ literal 0 HcmV?d00001 diff --git a/core/__pycache__/resnet.cpython-39.pyc b/core/__pycache__/resnet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4f2cea6f6df6fb7db14a794ba156249e2caad20 GIT binary patch literal 6094 zcmcgwTW{Rf5$54-uUcI!-<^1UDUxlY%W9?AF`C3TI7k}{sqMaO(J&MbC2_UOT_19N z5mrS38OW1^J{D+ENI`%?&+U7EK>xu(9t!lO`U@?J^qWIcyK?NdE>Llo!{M9@4`=3^ znL~GMtf=Am+0rG~KBH-WrJK>i#?5t{(H}v$##vkIuqI;(uQzoiGdiZOF|A`YEvBt$ zT>pwSZEkRrTioV3p8u`Y%<(sPk(ZvaX8t3Mm-*N;jgR@PRoFI~MMx@q9Fp-=Qi5cH zPeL-8O3M6AevD6J{1`vakMk4YD!j^1@>Af(`2~KOp8+?)FY>ee9Joo&?rXJ6`&hZQ zT(hNp&ySY>AoZI%JDCpwa4WlkB&i!a^`N91eIM$rk6@Kaja1WWI z1!LhfJ|Z<}dYPHuieunBx*a`8VRS^_2l2Hg;~K!lng-8(p{+8~#j`?F0qU97w7$?x ztx3Ac%i>Kp3cQ=`uD2mb3CkYoJv)e}xk*RSXCSc_L&w|tj=l@X#>N1;9vIukKy%F4 z+OY@vF4~=3Y_;;7as4^n70^!~=-WE@;z+O3z=WhcpwVMo!+MP62YXEBf)M)j{8~j? zQNQO4F^xydyyFC65Ic_2fwV|Lbums&rJHW-tuJ>)XMsy&C9F&Hp8v^Dr78S&Kk2zZ z@&#EW2u5zF*Y*<&S<;OB_NvUQhr)=`PK>FVMHSOyNd?F8+HMr#;bagoMdTC+!k!jQ zfM~3OUs;`HX0y#>L(R@+EBAEjnI4&9l>>hc;ec&HMLW3evMnfOhYgIBgOXFuIN;2T zYgt_DGw~ygu+&)e9_UvzHAgnmxR3WhAQPwYBypC=8${kDa-K*PWX}{AagppvlLB49 z`)*qr@ap8X58hqoR=eV{EBIyfR_N21sTUc3MAezcb#rP&3+a%Wo_N|`j!YW59p7$0L*OX@Jl!~M`=Re`?2|QLfgr+_LeQ_lyojKdNf;umaN{}Q z!HSI$92sZ_b~uc!`dH6x^vRF(0lLyY_B2xL!92yJ4~JA?vV?wRN~a2wV+wl}g}rfw zy~46u{vDX7MDJ1|QXoP}UNk@+h~uEO2|;=l3q%M53WI_`kT@4%BJr>T=SncxAPhFd zIcitVS0#=DrMN&u8SlWy)ZEt~kLV$UuW8_mf?yt@@D!VXMB(hf2>%7neu0_(SDfu+ zSR$Ndc(gOFl~X8O2MTll4u#?^ETNWvH4YciE|!Q?iM&ljDP^b`!oWv3BU0rdYwIhomuD)D>&M7_)U|THCU4J=;3B^9C=)Z|e3SO8^1?2P zZQNbJsEeevhugCmbjyko`77%f|`+p0)@~q8bDRp{x3mlOyPDP zb83Nx(Q6RWalLj)T*5Vp5j5?8h!F?TKo;)0Pd@E-+sNU$j{BqsR+gNOyWu-+_o*+W ztuFQSx{zKQ>2*;SZaiAcCU76EsR`^)g3xz`AP<%Ls)wmno{3~Jh=MSR-O%&ZYsxZJ zZ{E5aq5?!2@YoO5)?;aMH+E$%L~-G^9cgylcpWvvLlH}BH9-1Nky2WlzUVs0uNUb- z)(TZr>B1se3j3X;fIxl0I+BGii**i0stQg+T%&42SCOv5rV#l~sb85*BlcC;8Fo(9 z8k^(y)VAax@+rF=s^KX6P#C{Ov;Pi+D^KEV!-&qodo|=2)?(Azh9N$Qfv#9bJ{ge% zeS|RMxrJ6M>rH;QWvyvj_P|EIf}g9I%z?FSDz8gtg|BNd5Q!>Lhqs^D%8mBlM!g8H z9N?M8!2XhIKr_ojqs~Xk2r*MR7FjYW`-=DdhkZYc1Gk-|AIdz^c6&WPPA;D)@+l&QcDQb;_YWNV%lAqkhnLDoaugW>BSLK9#eb z!hr1*DGqWSc4Bb}xu14K=($bLN(xLwIyH}~;#W~3)~w|Obr4Y`1J#B z;YC|e#`^+iL?wkrb!Jv;D$WF*f^BfsH!G<%vV6zHyBLFrMNp{a#0)NCmdG3tRl<<1 zRcR^e#0~T9Fe_3l`wVBA6)Pu;lPZDT@hB>0hXIBX*pG2Wlt`)I!&eRV6hO-wI}n#VU>)VlBF+ zIJ)ZzKNc=x9k0H~>{dwe>|?784=ZXwQ(v;UcBIf$hUu zBrVlrMBxg0^tj*PjL2k~eFBD^X6oa>Q7OBzcm&GEc1u;*R9x?q*Ce-UFY!6u`b37# zsB}ZEzsGr4HzYkO-7L)?fo?=V?@m!O^E*P5aEYuC@rZCDFVhkY`~%LzS|aKHK}(x2 zh3kC%2-F0;4qZs?%hauMrj1uAhg$#i9p#Yp$dt3RaCFMqrzo<286?qFj<{DUhg$!P z^RVZTG)p1uPHpr*CG*MG+%QbG!gtZPc|4Kfi(@;Q9mYmW36uxRoa4Ca`-Sr2V;Cjv zyQ(@>F*ey@Q`&juXVH<^prQl?2$}yO9*0jUA5dP3=(Wo%&!+Uesk8L=54%vbr>awL LO}$?%C%=CIbKOi# literal 0 HcmV?d00001 diff --git a/core/__pycache__/utils.cpython-39.pyc b/core/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d3865a990b06c95cf04d6b99cc1d97b652e80fe GIT binary patch literal 2657 zcmai0TW=$`6(%`XjV{)DjjoX`3Kd(m5s<9+B}n0Jx4C$mZQ&?D?IP%;3t`rzac6(1!v6`p~Ba1*FIRhyD^i^rd;I|AG-9{Ya_auJcd{JUl%7$U~kRCu=qn zg5M`^-OKJZ2>F*C{O`}l;2kveClF3JEl5dIN-fU{mNM6Sspon>^<5vNfndv!d)!~K zWwav8ctw88(ps8`x=1c)n(!cP2!H9XNY>QNti`u@$fM6`y2WE&`<$e0p71*QTfD)W z=(ojf-a2RUkDMKmUi%W~Aw6nZ6Pb_oU^pG0=)4;DXEmkui;l`?!caM~C_)b2Iq}w< zqxQ-H+WTnguauBEE{3h!nk)$pL^adioa~S}y-P0W!gFJ97d&UXWZ`T7EU?#G!4Nu{ zdl&S8JZ&$6joi2LPY*VuV1x2+phAZ_+n^$ca{3qHJk&AtMDs9&yDQwxSER)-`AA6X}M%*9CUh%{j9@Z`eEH zF5B04X8EWSt?|5NZSpPL)&}?E&3)RiDfG|jkZn#1-dgV*yU9OnP`B)=abq*u zcIUFTZLNq6F*y2mU&2A;(vlhLd~!v9dPN@@djE<(oh6;lkrtEvPFI1ScaAIVcn`cG zm)7T3v~$T^AJ3vrm+y6X=ZZEBO?q3#*Lf*S@=T2K@xbvOg2}XxKRSN*aPZ!{zZx7q z?tgIn#CYn3GNIDEnrag~m$?=uklA=7Os&Y2UM~)_$wZ8~sShhzW*eb@R^?-pXd%nN z^PKBb;}v3Tf?+XLr@h2PCsQc}vV5URc-Y#v^UDkr6LFJFI z!-;j=e}6F;>lBxGeavW=dUp{jF45@R1_|B4c!wb9uji4340FuRUCX_7azgHL@1-1A zA2RD9BWSVn|G>C*PSIN&(t{^A zaGt|`wZVPma7ZU!zk%~D4nf~uBOPb=L_y$<8`xljeRz$1==KeOZQj6!JUD0A?U%e| zO9ql+fH~& z%U*0EB~Gd_SH|ZV!aq2zrc#-x%*RtLR2pxlhiO>mMUg8*hw`4)Vqgy1(g`UH)eG$$t04{qE5$Dyq?aG((^F0uvPCGf|juT?0(SGcAUtHt{e=8fU0C zjw>rJoMU=*V>dtr8kzdDOywuVHF%O$@HnbiyNBek(MGWpJZcR z2x;I1edrYWzv%z`BU2k;jNLGmWSnncJ(eb@T$8CD@LZ-5+^Q-pf7MYY&c`{5O*MK7 zO=<^(z$Xm<24!D|0el6Y$j6Ys;s?YSsiQeR{2t8g(0XEtl>HJf&;mYLdK}ip3q;O* z`wnpL3r5Mu)+65LJ%X>kB;5a-#n5GFy>S7|_y_+V^T2t2$vECxcrRSP8OQiMh{f-6 z6wvB?*6s|e0ygiQznv~5p!Wb%Rg4&tT*S=@fng literal 0 HcmV?d00001 diff --git a/save_dir/log.log b/save_dir/log.log new file mode 100644 index 0000000..c278092 --- /dev/null +++ b/save_dir/log.log @@ -0,0 +1 @@ +20210904-20:25:00 ---------------------------------------------------------------------------------------------------- diff --git a/train.py b/train.py index df3e2f0..b8411b6 100755 --- a/train.py +++ b/train.py @@ -1,4 +1,6 @@ +#from datasets.ShuffleMNIST.dataset import ShuffleMNIST import os +import numpy as np import torch.utils.data from torch.nn import DataParallel from datetime import datetime @@ -7,6 +9,13 @@ from core import model, dataset from core.utils import init_log, progress_bar, create_dir +import torchvision +from torchvision.utils import save_image +from torchvision import transforms + +from ShuffleMNIST import dataset as Shuffdata + + os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3' start_epoch = 1 save_dir = os.path.join(save_dir, datetime.now().strftime('%Y%m%d_%H%M%S')) @@ -19,12 +28,48 @@ _print = logging.info # read dataset -trainset = dataset.CUB(root='./CUB_200_2011', is_train=True, data_len=None) -trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, - shuffle=True, num_workers=8, drop_last=False) -testset = dataset.CUB(root='./CUB_200_2011', is_train=False, data_len=None) -testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, - shuffle=False, num_workers=8, drop_last=False) +#trainset = dataset.CUB(root='./CUB_200_2011', is_train=True, data_len=None) +#trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, +# shuffle=True, num_workers=8, drop_last=False) +#testset = dataset.CUB(root='./CUB_200_2011', is_train=False, data_len=None) +#testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, +# shuffle=False, num_workers=8, drop_last=False) + +#read dataset +batch_size_train = 64 +batch_size_test = 1000 + + +dataset_train = torchvision.datasets.MNIST('/home/alessio/alonso/datasets', train=True, download=True, + transform=torchvision.transforms.ToTensor()) + +dataset_test = torchvision.datasets.MNIST('/home/alessio/alonso/datasets', train=False, + download=True,transform=torchvision.transforms.ToTensor()) + +train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=batch_size_train,drop_last=True, shuffle = True) +test_loader = torch.utils.data.DataLoader(dataset_test,batch_size=batch_size_test, shuffle = True,drop_last=True) + +shuffled_train = Shuffdata.ShuffleMNIST(train_loader, anchors = [], num=4, radius = 42, wall_shape = 112, sum = True,is_train=True) +shuffled_test = Shuffdata.ShuffleMNIST(test_loader, anchors = [], num=4, radius = 42, wall_shape = 112, sum = True, is_train = False) + +print('There are {} images and {} labels in the train set.'.format(len(shuffled_train.train_img), + len(shuffled_train.train_label))) +print('There are {} images and {} labels in the test set.'.format(len(shuffled_test.test_img), + len(shuffled_test.test_label))) + +#Configuring shuffled DataLoader +from torch.utils.data.sampler import RandomSampler + +#se cambian estos nombres a train loader para que sean los que se llaman en la red +train_sampler = RandomSampler(shuffled_train, replacement=True, num_samples= 51200, generator=None) +test_sampler = RandomSampler(shuffled_test, replacement=True, num_samples= 5760, generator=None) + +trainloader = torch.utils.data.DataLoader(shuffled_train, batch_size=batch_size_train + ,drop_last=False, sampler = train_sampler) + +testloader = torch.utils.data.DataLoader(shuffled_test, batch_size=batch_size_train + ,drop_last=False, sampler = test_sampler) + # define model net = model.attention_net(topN=PROPOSAL_NUM) if resume: @@ -49,6 +94,10 @@ MultiStepLR(partcls_optimizer, milestones=[60, 100], gamma=0.1)] net = net.cuda() net = DataParallel(net) +device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + +# Eevitar warning y que si se concidere el primer valor de la taza de aprendizaje +# lr_scheduler.MultiStepLR() for epoch in range(start_epoch, 500): for scheduler in schedulers: @@ -57,14 +106,24 @@ # begin training _print('--' * 50) net.train() - for i, data in enumerate(trainloader): - img, label = data[0].cuda(), data[1].cuda() + for batch_idx, (data_, target_) in enumerate(trainloader): + #cambiamos sintaxis de esta parte + img, label = data_, target_.to(device) batch_size = img.size(0) raw_optimizer.zero_grad() part_optimizer.zero_grad() concat_optimizer.zero_grad() partcls_optimizer.zero_grad() + + #transformacion de los datos + if len(img.shape) == 3: + img = np.stack([img] * 3, 2) + img = np.transpose(img, (0,1,2,3)) + img = torch.as_tensor(img) + data_ = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img) + img = data_.to(device) + raw_logits, concat_logits, part_logits, _, top_n_prob = net(img) part_loss = model.list_loss(part_logits.view(batch_size * PROPOSAL_NUM, -1), label.unsqueeze(1).repeat(1, PROPOSAL_NUM).view(-1)).view(batch_size, PROPOSAL_NUM) @@ -87,9 +146,21 @@ train_correct = 0 total = 0 net.eval() - for i, data in enumerate(trainloader): + for batch_idx_t, (data_t, target_t) in enumerate(trainloader): with torch.no_grad(): - img, label = data[0].cuda(), data[1].cuda() + img, label = data_t, target_t.to(device) + + #transformacion de los datos + img = np.array(data_t) + #print(img.shape) + if len(img.shape) == 3: + img = np.stack([img] * 3, 2) + + img = np.transpose(img, (0,2,1,3)) + img = torch.as_tensor(img) + data_t = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img) + img = data_t.to(device) + batch_size = img.size(0) _, concat_logits, _, _, _ = net(img) # calculate loss From d221e6eccb506ce0a3a40ba76b86fc4470d08ab2 Mon Sep 17 00:00:00 2001 From: 8kta Date: Wed, 22 Sep 2021 00:50:14 +0000 Subject: [PATCH 4/4] Debugging test.py, doesnt fit sizes --- core/__pycache__/dataset.cpython-39.pyc | Bin 3228 -> 3228 bytes core/__pycache__/utils.cpython-39.pyc | Bin 2657 -> 2996 bytes core/dataset.py | 1 + core/utils.py | 12 ++ save_dir/log.log | 1 - train.py | 214 ++++++++++++++---------- 6 files changed, 141 insertions(+), 87 deletions(-) delete mode 100644 save_dir/log.log diff --git a/core/__pycache__/dataset.cpython-39.pyc b/core/__pycache__/dataset.cpython-39.pyc index e6764dac1b74cc5f33d276c06772ab47ea764c43..4fbadec28764d01d67f9261c56b01781b36f7dec 100644 GIT binary patch delta 91 zcmbOuIY*K=k(ZZ?0SLV6Y!hQP@}A*jyfyh2rxlPC3K diff --git a/core/__pycache__/utils.cpython-39.pyc b/core/__pycache__/utils.cpython-39.pyc index 1d3865a990b06c95cf04d6b99cc1d97b652e80fe..76a52667fc4513e2b94fe77392701c2d50f4e3f2 100644 GIT binary patch delta 942 zcmZ8f&rcIU6rMLbyPa*fTZ#xYKt*HF21E|%fdC#Pfm0KMUN$D1wX@Wf{>toDVr^3s zNbqEWn|RQew22okBu4)WPae3;&4UO33uhL@=x*Nky`6dQoA=(%kFl>4wwKMOk$j51 zHP6l-+0*d;X#RGeA%<(jAxvZB06B(_2PvjAV~7WIhz8~mJ;qMj$@rF^*#{@XQcl(< z12ROO9ob%v0XQQp%`#FU&n%XeJj!gAlYGe^VNtf^5Rl7ndBu!njQ4c?Igv z2-x6-gr5qi3kU_ShmN*X4`s#0c+|L8SX8lz@`XH~lWSY?GvhwY#V5vh7>{42Zo!TC zB=w;1Z>XR_BZ%Buvl_&cbOy@tU3zM4D$T7pZ_emk^&H_kxD&mY`g^EYhyK(~jEQcU`V-7f+ODm8Yh}Hx%UJZ{}40rgDo4)NSI| z70f7DQlL8GWd(nZX?S&|`nRf!_61$7H(9&pud5M;*Cbf-8YC~Kn5HKs68TNE^a8y~ IEt;k98@D~nx&QzG delta 640 zcmZ9JPixdb7{>F?=I?AaO}7i`T18lDS=y=x#e=myXz^r0y+!Mg?u=`2lPr_jgCZQe6jk_GD1 z;J`{^ifL2VMH&+r#zMe=N_z{96N`w9%y}T z8-Rv_yF8rN=L&KKl|p%yG_K`UAMv()>TRFvnYCK?H!ho5S7TFt^6o%ao_b#(ke~ex zI4@8A2RHv+s{E|T1R0gXJ-w&uXjtF_(jx@)!*zUJW8gdTP4Ly-(p|nS_wWV%WAg;$ z6t7MHlf(ngQo#t#cxFV~Mie2407Kls(EX`j#P92`$|odGvx1O5?Djhf6CJ6Ta7GA6 zy3gm1=2Sd3xMlV@<8sUjWos9zoN6quG`-=nX&WcLWN_AC(O}iU1j2U={14#V%nei*H9{T9d>~mjJHG&$){|%e diff --git a/core/dataset.py b/core/dataset.py index e058a31..266ffde 100755 --- a/core/dataset.py +++ b/core/dataset.py @@ -55,6 +55,7 @@ def __getitem__(self, index): img = transforms.Resize((600, 600), Image.BILINEAR)(img) img = transforms.CenterCrop(INPUT_SIZE)(img) img = transforms.ToTensor()(img) + img = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img) return img, target diff --git a/core/utils.py b/core/utils.py index 399f619..2ff95b6 100755 --- a/core/utils.py +++ b/core/utils.py @@ -4,6 +4,18 @@ import time import logging +import time + +def timer(func): + def wrapper(*args, **kwargs): + start = time.time() + rv = func(*args, **kwargs) + total = time.time() - start + print(f'Total: {total}') + return rv + return wrapper + + _, term_width = os.popen('stty size', 'r').read().split() #term_width = 80 term_width = int(term_width) diff --git a/save_dir/log.log b/save_dir/log.log deleted file mode 100644 index c278092..0000000 --- a/save_dir/log.log +++ /dev/null @@ -1 +0,0 @@ -20210904-20:25:00 ---------------------------------------------------------------------------------------------------- diff --git a/train.py b/train.py index b8411b6..0666a3b 100755 --- a/train.py +++ b/train.py @@ -7,7 +7,7 @@ from torch.optim.lr_scheduler import MultiStepLR from config import BATCH_SIZE, PROPOSAL_NUM, SAVE_FREQ, LR, WD, resume, save_dir from core import model, dataset -from core.utils import init_log, progress_bar, create_dir +from core.utils import init_log, progress_bar, create_dir, timer import torchvision from torchvision.utils import save_image @@ -15,6 +15,8 @@ from ShuffleMNIST import dataset as Shuffdata +from PIL import Image + os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3' start_epoch = 1 @@ -49,8 +51,8 @@ train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=batch_size_train,drop_last=True, shuffle = True) test_loader = torch.utils.data.DataLoader(dataset_test,batch_size=batch_size_test, shuffle = True,drop_last=True) -shuffled_train = Shuffdata.ShuffleMNIST(train_loader, anchors = [], num=4, radius = 42, wall_shape = 112, sum = True,is_train=True) -shuffled_test = Shuffdata.ShuffleMNIST(test_loader, anchors = [], num=4, radius = 42, wall_shape = 112, sum = True, is_train = False) +shuffled_train = Shuffdata.ShuffleMNIST(train_loader, anchors = [], num=4, radius = 42, wall_shape = 600, sum = True,is_train=True) +shuffled_test = Shuffdata.ShuffleMNIST(test_loader, anchors = [], num=4, radius = 42, wall_shape = 600, sum = True, is_train = False) print('There are {} images and {} labels in the train set.'.format(len(shuffled_train.train_img), len(shuffled_train.train_label))) @@ -99,83 +101,121 @@ # Eevitar warning y que si se concidere el primer valor de la taza de aprendizaje # lr_scheduler.MultiStepLR() -for epoch in range(start_epoch, 500): - for scheduler in schedulers: - scheduler.step() +@timer +def entrenamiento(start_epoch, _print, trainloader, testloader, net, creterion, raw_optimizer, concat_optimizer, part_optimizer, partcls_optimizer, schedulers, device): + for epoch in range(start_epoch, 500): + for scheduler in schedulers: + scheduler.step() # begin training - _print('--' * 50) - net.train() - for batch_idx, (data_, target_) in enumerate(trainloader): + _print('--' * 50) + net.train() + for batch_idx, (data_, target_) in enumerate(trainloader): #cambiamos sintaxis de esta parte - img, label = data_, target_.to(device) - batch_size = img.size(0) - raw_optimizer.zero_grad() - part_optimizer.zero_grad() - concat_optimizer.zero_grad() - partcls_optimizer.zero_grad() + img, label = data_, target_.to(device) + batch_size = img.size(0) + raw_optimizer.zero_grad() + part_optimizer.zero_grad() + concat_optimizer.zero_grad() + partcls_optimizer.zero_grad() #transformacion de los datos - if len(img.shape) == 3: - img = np.stack([img] * 3, 2) - img = np.transpose(img, (0,1,2,3)) - img = torch.as_tensor(img) - data_ = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img) - img = data_.to(device) - - raw_logits, concat_logits, part_logits, _, top_n_prob = net(img) - part_loss = model.list_loss(part_logits.view(batch_size * PROPOSAL_NUM, -1), + ##if len(img.shape) == 3: + ## img = np.stack([img] * 3, 2) + + ##img = np.transpose(img, (0,1,2,3)) + #img = Image.fromarray(img, mode='RGB') + #img = Image.fromarray(img) + #img = transforms.Resize((600, 600), Image.BILINEAR)(img) + ##img = torch.as_tensor(img) + #img = transforms.ToTensor()(img) + ##data_ = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img) + + + if len(img.shape) == 2: + img = np.stack([img] * 3, 2) + img = img.numpy() + img = Image.fromarray(img, mode='RGB') + img = transforms.Resize((600, 600), Image.BILINEAR)(img) + #img = transforms.RandomCrop(INPUT_SIZE)(img) + img = transforms.RandomHorizontalFlip()(img) + img = transforms.ToTensor()(img) + data_ = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img) + + + + img = data_.to(device) + + raw_logits, concat_logits, part_logits, _, top_n_prob = net(img) + part_loss = model.list_loss(part_logits.view(batch_size * PROPOSAL_NUM, -1), label.unsqueeze(1).repeat(1, PROPOSAL_NUM).view(-1)).view(batch_size, PROPOSAL_NUM) - raw_loss = creterion(raw_logits, label) - concat_loss = creterion(concat_logits, label) - rank_loss = model.ranking_loss(top_n_prob, part_loss) - partcls_loss = creterion(part_logits.view(batch_size * PROPOSAL_NUM, -1), + raw_loss = creterion(raw_logits, label) + concat_loss = creterion(concat_logits, label) + rank_loss = model.ranking_loss(top_n_prob, part_loss) + partcls_loss = creterion(part_logits.view(batch_size * PROPOSAL_NUM, -1), label.unsqueeze(1).repeat(1, PROPOSAL_NUM).view(-1)) - total_loss = raw_loss + rank_loss + concat_loss + partcls_loss - total_loss.backward() - raw_optimizer.step() - part_optimizer.step() - concat_optimizer.step() - partcls_optimizer.step() - progress_bar(i, len(trainloader), 'train') - - if epoch % SAVE_FREQ == 0: - train_loss = 0 - train_correct = 0 - total = 0 - net.eval() - for batch_idx_t, (data_t, target_t) in enumerate(trainloader): - with torch.no_grad(): - img, label = data_t, target_t.to(device) + total_loss = raw_loss + rank_loss + concat_loss + partcls_loss + total_loss.backward() + raw_optimizer.step() + part_optimizer.step() + concat_optimizer.step() + partcls_optimizer.step() + progress_bar(i, len(trainloader), 'train') + + if epoch % SAVE_FREQ == 0: + train_loss = 0 + train_correct = 0 + total = 0 + net.eval() + for batch_idx_t, (data_t, target_t) in enumerate(trainloader): + with torch.no_grad(): + img, label = data_t, target_t.to(device) #transformacion de los datos - img = np.array(data_t) + img = np.array(data_t) #print(img.shape) - if len(img.shape) == 3: - img = np.stack([img] * 3, 2) - - img = np.transpose(img, (0,2,1,3)) - img = torch.as_tensor(img) - data_t = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img) - img = data_t.to(device) - - batch_size = img.size(0) - _, concat_logits, _, _, _ = net(img) + ##if len(img.shape) == 3: + ## img = np.stack([img] * 3, 2) + + ##img = np.transpose(img, (0,2,1,3)) + #img = Image.fromarray(img, mode='RGB') + #img = Image.fromarray(img) + #img = transforms.Resize((600, 600), Image.BILINEAR)(img) + ##img = torch.as_tensor(img) + #img = transforms.ToTensor()(img) + #data_t = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img) + ##data_t = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img) + + if len(img.shape) == 2: + img = np.stack([img] * 3, 2) + img = img.numpy() + img = Image.fromarray(img, mode='RGB') + img = transforms.Resize((600, 600), Image.BILINEAR)(img) + #img = transforms.RandomCrop(INPUT_SIZE)(img) + img = transforms.RandomHorizontalFlip()(img) + img = transforms.ToTensor()(img) + data_t = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img) + + + img = data_t.to(device) + + batch_size = img.size(0) + _, concat_logits, _, _, _ = net(img) # calculate loss - concat_loss = creterion(concat_logits, label) + concat_loss = creterion(concat_logits, label) # calculate accuracy - _, concat_predict = torch.max(concat_logits, 1) - total += batch_size - train_correct += torch.sum(concat_predict.data == label.data) - train_loss += concat_loss.item() * batch_size - progress_bar(i, len(trainloader), 'eval train set') + _, concat_predict = torch.max(concat_logits, 1) + total += batch_size + train_correct += torch.sum(concat_predict.data == label.data) + train_loss += concat_loss.item() * batch_size + progress_bar(i, len(trainloader), 'eval train set') - train_acc = float(train_correct) / total - train_loss = train_loss / total + train_acc = float(train_correct) / total + train_loss = train_loss / total - _print( + _print( 'epoch:{} - train loss: {:.3f} and train acc: {:.3f} total sample: {}'.format( epoch, train_loss, @@ -183,26 +223,26 @@ total)) # evaluate on test set - test_loss = 0 - test_correct = 0 - total = 0 - for i, data in enumerate(testloader): - with torch.no_grad(): - img, label = data[0].cuda(), data[1].cuda() - batch_size = img.size(0) - _, concat_logits, _, _, _ = net(img) + test_loss = 0 + test_correct = 0 + total = 0 + for i, data in enumerate(testloader): + with torch.no_grad(): + img, label = data[0].cuda(), data[1].cuda() + batch_size = img.size(0) + _, concat_logits, _, _, _ = net(img) # calculate loss - concat_loss = creterion(concat_logits, label) + concat_loss = creterion(concat_logits, label) # calculate accuracy - _, concat_predict = torch.max(concat_logits, 1) - total += batch_size - test_correct += torch.sum(concat_predict.data == label.data) - test_loss += concat_loss.item() * batch_size - progress_bar(i, len(testloader), 'eval test set') - - test_acc = float(test_correct) / total - test_loss = test_loss / total - _print( + _, concat_predict = torch.max(concat_logits, 1) + total += batch_size + test_correct += torch.sum(concat_predict.data == label.data) + test_loss += concat_loss.item() * batch_size + progress_bar(i, len(testloader), 'eval test set') + + test_acc = float(test_correct) / total + test_loss = test_loss / total + _print( 'epoch:{} - test loss: {:.3f} and test acc: {:.3f} total sample: {}'.format( epoch, test_loss, @@ -210,10 +250,10 @@ total)) # save model - net_state_dict = net.module.state_dict() - if not os.path.exists(save_dir): - os.mkdir(save_dir) - torch.save({ + net_state_dict = net.module.state_dict() + if not os.path.exists(save_dir): + os.mkdir(save_dir) + torch.save({ 'epoch': epoch, 'train_loss': train_loss, 'train_acc': train_acc, @@ -222,4 +262,6 @@ 'net_state_dict': net_state_dict}, os.path.join(save_dir, '%03d.ckpt' % epoch)) +entrenamiento(start_epoch, _print, trainloader, testloader, net, creterion, raw_optimizer, concat_optimizer, part_optimizer, partcls_optimizer, schedulers, device) + print('finishing training')