-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathneural_launch.py
More file actions
120 lines (94 loc) · 3.79 KB
/
neural_launch.py
File metadata and controls
120 lines (94 loc) · 3.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#!/usr/bin/env python3
"""
Neural Chatbot Launcher
"""
import torch
import pickle
import sys
import os
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
VOCAB_SIZE = 256
EMBED_DIM = 100
HIDDEN_DIM = 200
MAX_LEN = 50
class Vocabulary:
def __init__(self):
self.char2idx = {chr(i): i for i in range(VOCAB_SIZE)}
self.idx2char = {i: chr(i) for i in range(VOCAB_SIZE)}
class EncoderDecoder(torch.nn.Module):
def __init__(self, vocab_size, embed_dim, hidden_dim):
super().__init__()
self.embed = torch.nn.Embedding(vocab_size, embed_dim, padding_idx=0)
self.encoder = torch.nn.LSTM(embed_dim, hidden_dim, batch_first=True, dropout=0.2)
self.decoder = torch.nn.LSTM(embed_dim, hidden_dim, batch_first=True, dropout=0.2)
self.fc = torch.nn.Linear(hidden_dim, vocab_size)
def forward(self, questions, answers):
q_embed = self.embed(questions)
_, (hidden, cell) = self.encoder(q_embed)
a_embed = self.embed(answers)
decoder_out, _ = self.decoder(a_embed, (hidden, cell))
logits = self.fc(decoder_out)
return logits
def generate(model, vocab, question, max_len=40, temperature=0.7):
"""Generate response with temperature sampling"""
model.eval()
# Encode question
q_tokens = [vocab.char2idx.get(c, 0) for c in question[:MAX_LEN]]
q_padded = q_tokens + [0] * (MAX_LEN - len(q_tokens))
q_input = torch.tensor([q_padded[:MAX_LEN]], dtype=torch.long).to(DEVICE)
with torch.no_grad():
# Encode
q_embed = model.embed(q_input)
_, (hidden, cell) = model.encoder(q_embed)
# Decode with sampling
answer = []
current_token = torch.tensor([[0]], dtype=torch.long).to(DEVICE)
for step in range(max_len):
token_embed = model.embed(current_token)
decoder_out, (hidden, cell) = model.decoder(token_embed, (hidden, cell))
logits = model.fc(decoder_out[0, 0])
# Temperature sampling
logits = logits / temperature
probs = torch.softmax(logits, dim=0)
next_token = torch.multinomial(probs, 1).item()
# Stop on padding, newline, or null chars
if next_token == 0 or next_token == 10 or next_token > 127:
break
# Only add printable characters
if 32 <= next_token < 127:
answer.append(next_token)
current_token = torch.tensor([[next_token]], dtype=torch.long).to(DEVICE)
# Decode
result = ''.join([chr(t) for t in answer])
return result.strip()
def main():
if not os.path.exists('neural_model.pt') or not os.path.exists('vocab.pkl'):
print("Error: Model files not found!")
print("Train first: python neural_train.py")
sys.exit(1)
print("=" * 60)
print("Loading Neural Chatbot...")
checkpoint = torch.load('neural_model.pt', map_location=DEVICE, weights_only=False)
config = checkpoint['config']
with open('vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
model = EncoderDecoder(
config['vocab_size'],
config['embed_dim'],
config['hidden_dim']
).to(DEVICE)
model.load_state_dict(checkpoint['model'])
print("Ready!")
print("=" * 60)
print("Chatbot Ready! (type 'quit' to exit)")
print("=" * 60)
while True:
user_input = input("\nYou: ").strip()
if user_input.lower() in ['quit', 'exit', 'q']:
print("Goodbye!")
break
if user_input:
response = generate(model, vocab, user_input)
print(f"Bot: {response}" if response else "Bot: ...")
if __name__ == "__main__":
main()