[fix bug] add optimizer.zero_grad() in kws/utils/executor.py (#72) (#73)

* fix bug in kws/utils/executor.py (#72)

* [fix bug] add zero_grad() above backward() in kws/utils/executor.py (#72)
This commit is contained in:
胡大炮 2022-06-05 22:39:26 +08:00 committed by GitHub
parent 41a3432198
commit 141d40704f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -46,6 +46,7 @@ class Executor:
logits = model(feats)
loss_type = args.get('criterion', 'max_pooling')
loss, acc = criterion(loss_type, logits, target, feats_lengths)
optimizer.zero_grad()
loss.backward()
grad_norm = clip_grad_norm_(model.parameters(), clip)
if torch.isfinite(grad_norm):