Skip to content

Commit 72b7f22

Browse files
author
wabywang(王本友)
committed
embedding_not_training
1 parent ba6bf13 commit 72b7f22

12 files changed

+21
-11
lines changed

models/CNNBasic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ def __init__(self, opt ):
1414

1515
self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
1616
if opt.__dict__.get("embeddings",None) is not None:
17-
self.encoder.weight=nn.Parameter(opt.embeddings)
17+
self.encoder.weight=nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
1818

1919
self.content_conv = nn.Sequential(
2020
nn.Conv1d(in_channels = opt.embedding_dim,

models/CNNMultiLayer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def __init__(self, opt):
1212
self.embed = nn.Embedding(opt.vocab_size + 1, opt.embedding_dim)
1313

1414
if opt.__dict__.get("embeddings",None) is not None:
15-
self.embed.weight=nn.Parameter(opt.embeddings)
15+
self.embed.weight=nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
1616

1717
self.conv1 = nn.Sequential(
1818
nn.Conv1d(opt.max_seq_len, 256, kernel_size=7, stride=1),

models/CNNText.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ def __init__(self, opt ):
1414

1515
self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
1616
if opt.__dict__.get("embeddings",None) is not None:
17-
self.encoder.weight=nn.Parameter(opt.embeddings)
17+
self.encoder.weight=nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
1818

1919

2020
self.content_conv = nn.Sequential(

models/Capsule.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def __init__(self,opt):
8686
self.kernel_size = 3
8787
self.kernel_size_primary=3
8888
if opt.__dict__.get("embeddings",None) is not None:
89-
self.embed.weight=nn.Parameter(opt.embeddings)
89+
self.embed.weight=nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
9090

9191
self.primary_capsules = CapsuleLayer(num_capsules=8, num_route_nodes=-1, in_channels=256, out_channels=32)
9292
self.digit_capsules = CapsuleLayer(num_capsules=opt.label_size, num_route_nodes=int(32 * opt.max_seq_len/2), in_channels=8,

models/FastText.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ def __init__(self, opt ):
1414
self.encoder = nn.Embedding(opt.vocab_size,opt.embedding_dim)
1515
if opt.__dict__.get("embeddings",None) is not None:
1616
print('load embedding')
17-
self.encoder.weight=nn.Parameter(opt.embeddings)
17+
self.encoder.weight=nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
1818

1919

2020
self.content_fc = nn.Sequential(

models/LSTM.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def __init__(self,opt):
1616
self.use_gpu = torch.cuda.is_available()
1717

1818
self.word_embeddings = nn.Embedding(opt.vocab_size, opt.embedding_dim)
19-
self.word_embeddings.weight = nn.Parameter(opt.embeddings)
19+
self.word_embeddings.weight = nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
2020
# self.word_embeddings.weight.data.copy_(torch.from_numpy(opt.embeddings))
2121
self.lstm = nn.LSTM(opt.embedding_dim, opt.hidden_dim)
2222
self.hidden2label = nn.Linear(opt.hidden_dim, opt.label_size)

models/LSTMBI.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def __init__(self,opt):
1616
self.use_gpu = torch.cuda.is_available()
1717

1818
self.word_embeddings = nn.Embedding(opt.vocab_size, opt.embedding_dim)
19-
self.word_embeddings.weight = nn.Parameter(opt.embeddings)
19+
self.word_embeddings.weight = nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
2020
# self.word_embeddings.weight.data.copy_(torch.from_numpy(opt.embeddings))
2121

2222
self.lstm_layers = opt.lstm_layers

models/LSTMwithAttention.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def __init__(self,opt):
1515
self.use_gpu = torch.cuda.is_available()
1616

1717
self.word_embeddings = nn.Embedding(opt.vocab_size, opt.embedding_dim)
18-
self.word_embeddings.weight = nn.Parameter(opt.embeddings)
18+
self.word_embeddings.weight = nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
1919
# self.word_embeddings.weight.data.copy_(torch.from_numpy(opt.embeddings))
2020

2121
self.num_layers = opt.lstm_layers

models/RCNN.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def __init__(self,opt):
1818
self.use_gpu = torch.cuda.is_available()
1919

2020
self.word_embeddings = nn.Embedding(opt.vocab_size, opt.embedding_dim)
21-
self.word_embeddings.weight = nn.Parameter(opt.embeddings)
21+
self.word_embeddings.weight = nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
2222
# self.word_embeddings.weight.data.copy_(torch.from_numpy(opt.embeddings))
2323

2424
self.num_layers = 1

models/RNN_CNN.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ def __init__(self,opt):
1414
self.use_gpu = torch.cuda.is_available()
1515

1616
self.word_embeddings = nn.Embedding(opt.vocab_size, opt.embedding_dim)
17-
self.word_embeddings.weight = nn.Parameter(opt.embeddings)
17+
self.word_embeddings.weight = nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
1818
# self.word_embeddings.weight.data.copy_(torch.from_numpy(opt.embeddings))
1919
self.lstm = nn.LSTM(opt.embedding_dim, opt.hidden_dim)
2020
###self.hidden2label = nn.Linear(opt.hidden_dim, opt.label_size)

models/SelfAttention.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ def __init__(self,opt):
1919
self.use_gpu = torch.cuda.is_available()
2020

2121
self.word_embeddings = nn.Embedding(opt.vocab_size, opt.embedding_dim)
22-
self.word_embeddings.weight = nn.Parameter(opt.embeddings)
22+
self.word_embeddings.weight = nn.Parameter(opt.embeddings,requires_grad=opt.embedding_training)
2323
# self.word_embeddings.weight.data.copy_(torch.from_numpy(opt.embeddings))
2424

2525
self.num_layers = 1

opts.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@ def parse_opt():
3030
help='max_epoch')
3131
parser.add_argument('--embedding_file', type=str, default="glove.6b.300",
3232
help='glove or w2v')
33+
parser.add_argument('--embedding_training', type=str, default="false",
34+
help='embedding_training')
3335
#kim CNN
3436
parser.add_argument('--kernel_sizes', type=str, default="1,2,3,5",
3537
help='kernel_sizes')
@@ -50,6 +52,9 @@ def parse_opt():
5052

5153

5254

55+
56+
57+
5358
#
5459
args = parser.parse_args()
5560

@@ -70,6 +75,11 @@ def parse_opt():
7075
args.debug = True
7176
else:
7277
args.debug = False
78+
79+
if args.embedding_training.lower() =="true":
80+
args.embedding_training = True
81+
else:
82+
args.embedding_training = False
7383
if os.path.exists("proxy.config"):
7484
with open("proxy.config") as f:
7585

0 commit comments

Comments
 (0)