Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
人
人工智能系统实战第三期
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
yy
人工智能系统实战第三期
Commits
7a67c232
Commit
7a67c232
authored
Jan 29, 2024
by
前钰
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Upload New File
parent
4e262458
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
124 additions
and
0 deletions
+124
-0
train_GoogLeNet.py
人工智能系统实战第三期/实战代码/计算机视觉/CNN/train_GoogLeNet.py
+124
-0
No files found.
人工智能系统实战第三期/实战代码/计算机视觉/CNN/train_GoogLeNet.py
0 → 100644
View file @
7a67c232
import
os
import
os
import
sys
import
json
import
torch
import
torch.nn
as
nn
from
torchvision
import
transforms
,
datasets
import
torch.optim
as
optim
from
tqdm
import
tqdm
from
network.GoogLeNet
import
GoogLeNet
def
main
():
batch_size
=
128
num_classes
=
5
net
=
GoogLeNet
(
num_classes
=
num_classes
,
aux_logits
=
True
,
init_weights
=
False
)
device
=
torch
.
device
(
"cuda:0"
if
torch
.
cuda
.
is_available
()
else
"cpu"
)
print
(
"using {} device."
.
format
(
device
))
data_root
=
os
.
path
.
abspath
(
os
.
path
.
join
(
os
.
getcwd
(),
"./"
))
# get data root path
image_path
=
os
.
path
.
join
(
data_root
,
"data"
)
# flower data set path
assert
os
.
path
.
exists
(
image_path
),
"{} path does not exist."
.
format
(
image_path
)
net
.
to
(
device
)
loss_function
=
nn
.
CrossEntropyLoss
()
optimizer
=
optim
.
Adam
(
net
.
parameters
(),
lr
=
0.0003
)
data_transform
=
{
"train"
:
transforms
.
Compose
([
transforms
.
RandomResizedCrop
(
224
),
transforms
.
RandomHorizontalFlip
(),
transforms
.
ToTensor
(),
transforms
.
Normalize
((
0.5
,
0.5
,
0.5
),
(
0.5
,
0.5
,
0.5
))]),
"val"
:
transforms
.
Compose
([
transforms
.
Resize
((
224
,
224
)),
transforms
.
ToTensor
(),
transforms
.
Normalize
((
0.5
,
0.5
,
0.5
),
(
0.5
,
0.5
,
0.5
))])}
train_dataset
=
datasets
.
ImageFolder
(
root
=
os
.
path
.
join
(
image_path
,
"train"
),
transform
=
data_transform
[
"train"
])
train_num
=
len
(
train_dataset
)
# {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
flower_list
=
train_dataset
.
class_to_idx
cla_dict
=
dict
((
val
,
key
)
for
key
,
val
in
flower_list
.
items
())
# write dict into json file
json_str
=
json
.
dumps
(
cla_dict
,
indent
=
4
)
with
open
(
'class_indices.json'
,
'w'
)
as
json_file
:
json_file
.
write
(
json_str
)
nw
=
min
([
os
.
cpu_count
(),
batch_size
if
batch_size
>
1
else
0
,
8
])
# number of workers
print
(
'Using {} dataloader workers every process'
.
format
(
nw
))
train_loader
=
torch
.
utils
.
data
.
DataLoader
(
train_dataset
,
batch_size
=
batch_size
,
shuffle
=
True
,
num_workers
=
nw
)
validate_dataset
=
datasets
.
ImageFolder
(
root
=
os
.
path
.
join
(
image_path
,
"val"
),
transform
=
data_transform
[
"val"
])
val_num
=
len
(
validate_dataset
)
validate_loader
=
torch
.
utils
.
data
.
DataLoader
(
validate_dataset
,
batch_size
=
batch_size
,
shuffle
=
False
,
num_workers
=
nw
)
print
(
"using {} images for training, {} images for validation."
.
format
(
train_num
,
val_num
))
# test_data_iter = iter(validate_loader)
# test_image, test_label = test_data_iter.next()
epochs
=
30
best_acc
=
0.0
save_path
=
'./checkpoints/googlenet/googleNet.pth'
train_steps
=
len
(
train_loader
)
for
epoch
in
range
(
epochs
):
# train
net
.
train
()
running_loss
=
0.0
train_bar
=
tqdm
(
train_loader
,
file
=
sys
.
stdout
)
for
step
,
data
in
enumerate
(
train_bar
):
images
,
labels
=
data
optimizer
.
zero_grad
()
logits
,
aux_logits2
,
aux_logits1
=
net
(
images
.
to
(
device
))
loss0
=
loss_function
(
logits
,
labels
.
to
(
device
))
loss1
=
loss_function
(
aux_logits1
,
labels
.
to
(
device
))
loss2
=
loss_function
(
aux_logits2
,
labels
.
to
(
device
))
loss
=
loss0
+
loss1
*
0.2
+
loss2
*
0.4
loss
.
backward
()
optimizer
.
step
()
# print statistics
running_loss
+=
loss
.
item
()
train_bar
.
desc
=
"train epoch[{}/{}] loss:{:.3f}"
.
format
(
epoch
+
1
,
epochs
,
loss
)
# validate
net
.
eval
()
acc
=
0.0
# accumulate accurate number / epoch
with
torch
.
no_grad
():
val_bar
=
tqdm
(
validate_loader
,
file
=
sys
.
stdout
)
for
val_data
in
val_bar
:
val_images
,
val_labels
=
val_data
outputs
=
net
(
val_images
.
to
(
device
))
# eval model only have last output layer
predict_y
=
torch
.
max
(
outputs
,
dim
=
1
)[
1
]
acc
+=
torch
.
eq
(
predict_y
,
val_labels
.
to
(
device
))
.
sum
()
.
item
()
val_accurate
=
acc
/
val_num
print
(
'[epoch
%
d] train_loss:
%.3
f val_accuracy:
%.3
f'
%
(
epoch
+
1
,
running_loss
/
train_steps
,
val_accurate
))
if
val_accurate
>
best_acc
:
best_acc
=
val_accurate
torch
.
save
(
net
.
state_dict
(),
save_path
)
print
(
'Finished Training'
)
if
__name__
==
'__main__'
:
main
()
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment