OpenTransformer commited on
Commit
95f6852
Β·
verified Β·
1 Parent(s): f98bd7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +386 -364
app.py CHANGED
@@ -1,364 +1,386 @@
1
- #!/usr/bin/env python3
2
- # app.py β€” Chat inference for AGILLM2 (loads checkpoint from HF Hub)
3
- # - Downloads final.pt from OpenTransformer/AGILLM2-fast-training
4
- # - Qwen tokenizer with chat template
5
- # - CLI REPL and optional Gradio UI (--gradio)
6
- from __future__ import annotations
7
- import os, sys, time, math, pathlib, argparse
8
- from typing import Optional, Tuple, List, Dict, Any
9
-
10
- import torch
11
- import torch.nn as nn
12
- import torch.nn.functional as F
13
-
14
- from huggingface_hub import hf_hub_download
15
- from transformers import AutoTokenizer, logging as hf_log
16
- hf_log.set_verbosity_error()
17
-
18
- # ─────────── Config (env-overridable) ───────────
19
- MODEL_REPO = os.getenv("MODEL_REPO", "OpenTransformer/AGILLM2-fast-training")
20
- CKPT_NAME = os.getenv("CKPT_NAME", "final.pt")
21
- TOKENIZER_ID = os.getenv("TOKENIZER_ID", "Qwen/Qwen3-235B-A22B-Thinking-2507")
22
-
23
- DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
24
- torch.backends.cuda.matmul.allow_tf32 = True
25
- try:
26
- torch.set_float32_matmul_precision("high")
27
- except Exception:
28
- pass
29
-
30
- # ─────────── Tokenizer ───────────
31
- tok = AutoTokenizer.from_pretrained(TOKENIZER_ID, use_fast=True, trust_remote_code=True)
32
- if tok.pad_token is None:
33
- tok.add_special_tokens({"pad_token": "[PAD]"})
34
- VOCAB = max(tok.get_vocab().values()) + 1
35
- BLANK = tok.pad_token_id
36
- EOS = tok.eos_token_id if tok.eos_token_id is not None else tok.sep_token_id
37
-
38
- # ─────────── AMP helper ───────────
39
- try:
40
- from torch.amp import autocast as _ac, GradScaler # noqa
41
- except Exception:
42
- from torch.cuda.amp import autocast as _ac, GradScaler # noqa
43
-
44
- def _supports_fp8() -> bool:
45
- return hasattr(torch, "float8_e4m3fn")
46
-
47
- def _auto_amp_dtype(prefer_fp8: bool = False):
48
- if DEV.type != "cuda":
49
- return torch.float32
50
- if prefer_fp8 and _supports_fp8():
51
- return torch.float8_e4m3fn
52
- try:
53
- if torch.cuda.is_bf16_supported():
54
- return torch.bfloat16
55
- return torch.float16
56
- except Exception:
57
- return torch.float16
58
-
59
- def amp(enabled: bool, prefer_fp8: bool = False):
60
- if not (enabled and DEV.type == "cuda"):
61
- from contextlib import nullcontext
62
- return nullcontext()
63
- return _ac(device_type="cuda", dtype=_auto_amp_dtype(prefer_fp8=prefer_fp8))
64
-
65
- # ─────────── ALiBi helpers ───────────
66
- def _alibi_slopes(n_heads: int):
67
- import math as _m
68
- def pow2slopes(n):
69
- start = 2 ** (-2 ** -(_m.log2(n) - 3))
70
- ratio = start
71
- return [start * (ratio ** i) for i in range(n)]
72
- if _m.log2(n_heads).is_integer():
73
- vals = pow2slopes(n_heads)
74
- else:
75
- closest = 2 ** _m.floor(_m.log2(n_heads))
76
- vals = pow2slopes(closest); extra = pow2slopes(2 * closest)
77
- vals += extra[0::2][: n_heads - closest]
78
- return torch.tensor(vals, device=DEV).view(1, n_heads, 1, 1)
79
-
80
- def alibi_bias(n_heads: int, n_tokens: int):
81
- i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
82
- j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
83
- dist = (j - i).clamp_min(0)
84
- slopes = _alibi_slopes(n_heads)
85
- return -slopes * dist
86
-
87
- # ─────────── Model (your 5L core, AR head) ───────────
88
- class LowRankMHA(nn.Module):
89
- def __init__(self, d: int, h: int, r: int, use_relpos: bool = True):
90
- super().__init__()
91
- assert d % h == 0, "d must be divisible by number of heads"
92
- self.h, self.dk = h, d // h
93
- self.use_relpos = use_relpos
94
- self.q = nn.Linear(d, d, bias=False)
95
- self.k = nn.Linear(d, d, bias=False)
96
- self.v = nn.Linear(d, d, bias=False)
97
- self.U = nn.Parameter(torch.randn(self.dk, r))
98
- nn.init.orthogonal_(self.U)
99
- self.proj = nn.Linear(h * r, d, bias=False)
100
- self.drop = nn.Dropout(0.1)
101
-
102
- def _proj(self, x):
103
- B, N, _ = x.shape
104
- return (x.view(B, N, self.h, self.dk).transpose(1, 2) @ self.U)
105
-
106
- def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None,
107
- rel_bias_tokens: Optional[int] = None,
108
- kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
109
- use_cache: bool = False):
110
- q = self._proj(self.q(x))
111
- k_new = self._proj(self.k(x))
112
- v_new = self._proj(self.v(x))
113
- if kv_cache is None:
114
- k, v = k_new, v_new
115
- else:
116
- k, v = kv_cache
117
- if use_cache:
118
- k = torch.cat([k, k_new], dim=2)
119
- v = torch.cat([v, v_new], dim=2)
120
- att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
121
- if q.size(2) == k.size(2):
122
- if self.use_relpos and rel_bias_tokens is not None:
123
- att = att + alibi_bias(self.h, rel_bias_tokens)
124
- if mask is not None:
125
- att = att + mask
126
- z = (att.softmax(-1) @ v).transpose(1, 2)
127
- z = z.reshape(x.size(0), x.size(1), -1)
128
- out = self.drop(self.proj(z))
129
- return (out, (k, v)) if use_cache else out
130
-
131
- class Block(nn.Module):
132
- def __init__(self, d: int, h: int, r: int):
133
- super().__init__()
134
- self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
135
- self.mha = LowRankMHA(d, h, r, use_relpos=True)
136
- self.ff = nn.Sequential(nn.Linear(d, 4 * d), nn.ReLU(), nn.Linear(4 * d, d))
137
-
138
- def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor],
139
- kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
140
- use_cache: bool = False):
141
- n = x.size(1)
142
- if use_cache:
143
- y, new_kv = self.mha(self.ln1(x), mask, rel_bias_tokens=n if mask is not None else None, kv_cache=kv, use_cache=True)
144
- x = x + y
145
- x = x + self.ff(self.ln2(x))
146
- return x, new_kv
147
- else:
148
- x = x + self.mha(self.ln1(x), mask, rel_bias_tokens=n)
149
- return x + self.ff(self.ln2(x))
150
-
151
- class Encoder(nn.Module):
152
- def __init__(self, cfg: Dict[str, int]):
153
- super().__init__()
154
- d, l, h, r = cfg["d"], cfg["layers"], cfg["heads"], cfg["rank"]
155
- self.emb = nn.Embedding(VOCAB, d)
156
- self.blocks = nn.ModuleList([Block(d, h, r) for _ in range(l)])
157
- self.ln = nn.LayerNorm(d)
158
-
159
- def forward(self, ids: torch.Tensor, mask: Optional[torch.Tensor],
160
- kv_caches: Optional[List[Optional[Tuple[torch.Tensor, torch.Tensor]]]] = None,
161
- use_cache: bool = False):
162
- x = self.emb(ids)
163
- if not use_cache:
164
- for blk in self.blocks:
165
- x = blk(x, mask)
166
- return self.ln(x)
167
- new_kvs: List[Tuple[torch.Tensor, torch.Tensor]] = []
168
- for i, blk in enumerate(self.blocks):
169
- kv = kv_caches[i] if (kv_caches is not None) else None
170
- x, kv_out = blk(x, mask, kv, use_cache=True)
171
- new_kvs.append(kv_out)
172
- return self.ln(x), new_kvs
173
-
174
- class ARHead(nn.Module):
175
- def __init__(self, d):
176
- super().__init__()
177
- self.proj = nn.Linear(d, VOCAB)
178
- def forward(self, h): return self.proj(h)
179
-
180
- # ─────────── Misc ───────────
181
- def causal_mask(n: int):
182
- m = torch.full((1, 1, n, n), float("-inf"), device=DEV)
183
- return torch.triu(m, 1)
184
-
185
- def _resolve_cfg_from_ckpt(sd: dict) -> Dict[str, int]:
186
- if isinstance(sd, dict) and "cfg" in sd and isinstance(sd["cfg"], dict):
187
- return dict(sd["cfg"])
188
- core = sd.get("core", {})
189
- emb_w = core.get("emb.weight")
190
- if emb_w is None:
191
- raise RuntimeError("Checkpoint missing core.emb.weight; cannot infer d/l/h/r.")
192
- d = emb_w.shape[1]
193
- layer_ids = []
194
- for k in core.keys():
195
- if k.startswith("blocks."):
196
- parts = k.split(".")
197
- if len(parts) > 2 and parts[1].isdigit():
198
- layer_ids.append(int(parts[1]))
199
- layers = (max(layer_ids) + 1) if layer_ids else 0
200
- U = core.get("blocks.0.mha.U")
201
- if U is None:
202
- raise RuntimeError("Checkpoint missing blocks.0.mha.U; cannot infer rank/heads.")
203
- dk, r = U.shape
204
- h = d // dk
205
- return {"d": d, "layers": layers, "heads": h, "rank": r}
206
-
207
- def load_joint_from_hub(repo_id: str, filename: str):
208
- ckpt_path = hf_hub_download(repo_id=repo_id, filename=filename)
209
- sd = torch.load(ckpt_path, map_location="cpu")
210
- cfg = _resolve_cfg_from_ckpt(sd)
211
- core = Encoder(cfg).to(DEV)
212
- ar_h = ARHead(cfg["d"]).to(DEV)
213
- core.load_state_dict(sd["core"])
214
- if "ar" in sd: ar_h.load_state_dict(sd["ar"])
215
- return core, ar_h, cfg
216
-
217
- # ─────────── Chat helpers ───────────
218
- def render_chat(messages: List[Dict[str, str]], add_generation_prompt: bool = True) -> str:
219
- # messages: [{"role":"system/user/assistant","content": "..."}]
220
- return tok.apply_chat_template(messages, tokenize=False, add_generation_prompt=add_generation_prompt)
221
-
222
- def _apply_no_repeat_ngram(logits: torch.Tensor, ids: torch.Tensor, n: int):
223
- if n <= 0 or ids.size(1) < n - 1: return logits
224
- prefix = ids[0, -(n - 1):].tolist()
225
- banned, tokens = [], ids[0].tolist()
226
- for i in range(len(tokens) - n + 1):
227
- if tokens[i:i + n - 1] == prefix:
228
- banned.append(tokens[i + n - 1])
229
- if banned:
230
- banned_idx = torch.tensor(banned, device=logits.device, dtype=torch.long)
231
- logits[..., banned_idx] = float("-inf")
232
- return logits
233
-
234
- def _apply_rep_presence_frequency(logits, ids, last_n, repetition_penalty, presence_penalty, frequency_penalty):
235
- if ids.numel() == 0: return logits
236
- hist = ids[0, -last_n:].to(torch.long) if last_n > 0 else ids[0].to(torch.long)
237
- if hist.numel() == 0: return logits
238
- uniq, counts = torch.unique(hist, return_counts=True)
239
- if presence_penalty != 0.0 or frequency_penalty != 0.0:
240
- adjust = presence_penalty + frequency_penalty * counts.to(logits.dtype)
241
- logits[..., uniq] = logits[..., uniq] - adjust
242
- if repetition_penalty and abs(repetition_penalty - 1.0) > 1e-6:
243
- sel = logits[..., uniq]
244
- sel = torch.where(sel > 0, sel / repetition_penalty, sel * repetition_penalty)
245
- logits[..., uniq] = sel
246
- return logits
247
-
248
- def _filter_top_k_top_p_min_p(logits: torch.Tensor, top_k: int, top_p: float, min_p: float, temperature: float):
249
- logits = logits / max(temperature, 1e-8)
250
- if logits.dim() == 1: logits = logits.unsqueeze(0)
251
- probs = logits.softmax(-1)
252
- V = probs.size(-1)
253
- if top_k and top_k < V:
254
- vals, idx = torch.topk(probs, top_k, dim=-1)
255
- mask = torch.full_like(probs, 0.0); mask.scatter_(1, idx, 1.0); probs = probs * mask
256
- if top_p < 1.0:
257
- sorted_probs, sorted_idx = torch.sort(probs, descending=True, dim=-1)
258
- cumsum = torch.cumsum(sorted_probs, dim=-1)
259
- keep = cumsum <= top_p; keep[..., 0] = True
260
- mask = torch.zeros_like(probs); mask.scatter_(1, sorted_idx, keep.to(mask.dtype))
261
- probs = probs * mask
262
- if min_p > 0.0:
263
- probs = torch.where(probs >= min_p, probs, torch.zeros_like(probs))
264
- sums = probs.sum(-1, keepdim=True); empty = (sums == 0)
265
- if empty.any():
266
- fallback_idx = logits.argmax(-1, keepdim=True)
267
- probs = torch.where(empty, torch.zeros_like(probs), probs)
268
- probs.scatter_(-1, fallback_idx, torch.where(empty, torch.ones_like(sums), torch.zeros_like(sums)))
269
- probs = probs / probs.sum(-1, keepdim=True)
270
- return probs
271
-
272
- @torch.no_grad()
273
- def chat_decode(core, ar_h, messages: List[Dict[str, str]], max_new: int = 200, T: float = 0.9,
274
- greedy: bool = False, top_k: int = 50, top_p: float = 0.9, min_p: float = 0.0,
275
- repetition_penalty: float = 1.1, presence_penalty: float = 0.3, frequency_penalty: float = 0.2,
276
- penalty_last_n: int = 128, no_repeat_ngram_size: int = 3,
277
- use_fp8: bool = False, fp8_fallback: bool = True) -> str:
278
- prompt = render_chat(messages, add_generation_prompt=True)
279
- ids = torch.tensor([tok.encode(prompt)], device=DEV)
280
- prompt_len = ids.size(1)
281
-
282
- with amp(use_fp8 or False, prefer_fp8=(use_fp8 and (_supports_fp8() or fp8_fallback))):
283
- h_full, kvs = core(ids, causal_mask(ids.size(1)), use_cache=True)
284
- for _ in range(max_new):
285
- logits = ar_h(h_full)[:, -1]
286
- logits = _apply_no_repeat_ngram(logits, ids, no_repeat_ngram_size)
287
- logits = _apply_rep_presence_frequency(logits, ids, penalty_last_n,
288
- repetition_penalty, presence_penalty, frequency_penalty)
289
- if greedy:
290
- nxt = logits.argmax(-1, keepdim=True)
291
- else:
292
- probs = _filter_top_k_top_p_min_p(logits.squeeze(0), top_k, top_p, min_p, T)
293
- nxt = probs.multinomial(1)
294
- ids = torch.cat([ids, nxt.unsqueeze(0) if nxt.dim()==1 else nxt], 1)
295
- x = ids[:, -1:]
296
- h_full, kvs = core(x, None, kv_caches=kvs, use_cache=True)
297
-
298
- full_ids = ids[0].tolist()
299
- return tok.decode(full_ids[prompt_len:], skip_special_tokens=True).strip()
300
-
301
- # ─────────── CLI / Gradio ───────────
302
- def main():
303
- ap = argparse.ArgumentParser()
304
- ap.add_argument("--gradio", action="store_true", help="Launch a minimal Gradio chat UI")
305
- ap.add_argument("--fp8-only", action="store_true")
306
- ap.add_argument("--greedy", action="store_true")
307
- ap.add_argument("--top_k", type=int, default=50)
308
- ap.add_argument("--top_p", type=float, default=0.9)
309
- ap.add_argument("--temperature", type=float, default=0.9)
310
- ap.add_argument("--max_new", type=int, default=200)
311
- args = ap.parse_args()
312
-
313
- print(f"[init] downloading checkpoint {CKPT_NAME} from {MODEL_REPO} …", flush=True)
314
- core, ar_h, cfg = load_joint_from_hub(MODEL_REPO, CKPT_NAME)
315
- core.eval(); ar_h.eval()
316
- print(f"[ready] cfg={cfg} device={DEV.type} vocab={VOCAB}")
317
-
318
- if args.gradio:
319
- import gradio as gr
320
- with gr.Blocks() as demo:
321
- gr.Markdown("### OpenTransformer / AGILLM2 β€” Chat")
322
- chatbox = gr.Chatbot(height=480)
323
- msg = gr.Textbox(placeholder="Say something smart…")
324
- clear = gr.Button("Clear")
325
- def _chat(history, user_msg):
326
- messages = [{"role":"system","content":"You are a helpful, concise assistant."}]
327
- for u,a in history or []:
328
- messages.append({"role":"user","content":u})
329
- messages.append({"role":"assistant","content":a})
330
- messages.append({"role":"user","content":user_msg})
331
- reply = chat_decode(core, ar_h, messages, max_new=args.max_new, T=args.temperature,
332
- greedy=args.greedy, top_k=args.top_k, top_p=args.top_p,
333
- use_fp8=args.fp8_only, fp8_fallback=True)
334
- history = (history or []) + [(user_msg, reply)]
335
- return history, ""
336
- msg.submit(_chat, [chatbox, msg], [chatbox, msg], queue=False)
337
- clear.click(lambda: None, None, chatbox, queue=False)
338
- demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
339
- return
340
-
341
- # CLI REPL
342
- history: List[Tuple[str,str]] = []
343
- print("Type to chat. Ctrl+C to exit.")
344
- while True:
345
- try:
346
- user = input("\nYou: ").strip()
347
- if not user: continue
348
- messages = [{"role":"system","content":"You are a helpful, concise assistant."}]
349
- for u,a in history:
350
- messages.append({"role":"user","content":u})
351
- messages.append({"role":"assistant","content":a})
352
- messages.append({"role":"user","content":user})
353
- t0 = time.time()
354
- reply = chat_decode(core, ar_h, messages, max_new=args.max_new, T=args.temperature,
355
- greedy=args.greedy, top_k=args.top_k, top_p=args.top_p,
356
- use_fp8=args.fp8_only, fp8_fallback=True)
357
- dt = time.time()-t0
358
- print(f"Bot: {reply}\n[{len(tok.encode(reply))} tok in {dt:.2f}s]")
359
- history.append((user, reply))
360
- except KeyboardInterrupt:
361
- print("\nbye."); break
362
-
363
- if __name__ == "__main__":
364
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # app.py β€” Chat inference for AGILLM2 (loads checkpoint from HF Hub)
3
+ # - Downloads final.pt (or chosen file) from OpenTransformer/AGILLM2-fast-training
4
+ # - Rebuilds your 5L-style AR model from checkpoint cfg
5
+ # - Qwen chat template for multi-turn
6
+ # - Auto-Gradio on Hugging Face Spaces; optional CLI locally
7
+
8
+ from __future__ import annotations
9
+ import os, sys, time, math, pathlib, argparse
10
+ from typing import Optional, Tuple, List, Dict, Any
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+
16
+ from huggingface_hub import hf_hub_download
17
+ from transformers import AutoTokenizer, logging as hf_log
18
+ import warnings
19
+
20
+ # ─────────── Quiet logs
21
+ hf_log.set_verbosity_error()
22
+ warnings.filterwarnings("ignore", category=UserWarning)
23
+
24
+ # ─────────── Config (env-overridable)
25
+ MODEL_REPO = os.getenv("MODEL_REPO", "OpenTransformer/AGILLM2-fast-training")
26
+ CKPT_NAME = os.getenv("CKPT_NAME", "final.pt") # e.g. "step04121612.pt"
27
+ TOKENIZER_ID = os.getenv("TOKENIZER_ID", "Qwen/Qwen3-235B-A22B-Thinking-2507")
28
+
29
+ # Device + TF32 policy (new API; harmless on CPU)
30
+ DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
31
+ if DEV.type == "cuda":
32
+ try:
33
+ torch.backends.cuda.matmul.fp32_precision = "high" # alt: "ieee"
34
+ except Exception:
35
+ pass
36
+
37
+ # ─────────── Tokenizer
38
+ tok = AutoTokenizer.from_pretrained(TOKENIZER_ID, use_fast=True, trust_remote_code=True)
39
+ if tok.pad_token is None:
40
+ tok.add_special_tokens({"pad_token": "[PAD]"})
41
+ VOCAB = max(tok.get_vocab().values()) + 1
42
+ BLANK = tok.pad_token_id
43
+ EOS = tok.eos_token_id if tok.eos_token_id is not None else tok.sep_token_id
44
+
45
+ # ─────────── AMP helper
46
+ try:
47
+ from torch.amp import autocast as _ac, GradScaler # noqa
48
+ except Exception:
49
+ from torch.cuda.amp import autocast as _ac, GradScaler # noqa
50
+
51
+ def _supports_fp8() -> bool:
52
+ return hasattr(torch, "float8_e4m3fn")
53
+
54
+ def _auto_amp_dtype(prefer_fp8: bool = False):
55
+ if DEV.type != "cuda":
56
+ return torch.float32
57
+ if prefer_fp8 and _supports_fp8():
58
+ return torch.float8_e4m3fn
59
+ try:
60
+ if torch.cuda.is_bf16_supported():
61
+ return torch.bfloat16
62
+ return torch.float16
63
+ except Exception:
64
+ return torch.float16
65
+
66
+ def amp(enabled: bool, prefer_fp8: bool = False):
67
+ if not (enabled and DEV.type == "cuda"):
68
+ from contextlib import nullcontext
69
+ return nullcontext()
70
+ return _ac(device_type="cuda", dtype=_auto_amp_dtype(prefer_fp8=prefer_fp8))
71
+
72
+ # ─────────── ALiBi helpers
73
+ def _alibi_slopes(n_heads: int):
74
+ import math as _m
75
+ def pow2slopes(n):
76
+ start = 2 ** (-2 ** -(_m.log2(n) - 3))
77
+ ratio = start
78
+ return [start * (ratio ** i) for i in range(n)]
79
+ if _m.log2(n_heads).is_integer():
80
+ vals = pow2slopes(n_heads)
81
+ else:
82
+ closest = 2 ** _m.floor(_m.log2(n_heads))
83
+ vals = pow2slopes(closest)
84
+ extra = pow2slopes(2 * closest)
85
+ vals += extra[0::2][: n_heads - closest]
86
+ return torch.tensor(vals, device=DEV).view(1, n_heads, 1, 1)
87
+
88
+ def alibi_bias(n_heads: int, n_tokens: int):
89
+ i = torch.arange(n_tokens, device=DEV).view(1, 1, n_tokens, 1)
90
+ j = torch.arange(n_tokens, device=DEV).view(1, 1, 1, n_tokens)
91
+ dist = (j - i).clamp_min(0)
92
+ slopes = _alibi_slopes(n_heads)
93
+ return -slopes * dist
94
+
95
+ # ─────────── Model (5L core + AR head, matches your training)
96
+ class LowRankMHA(nn.Module):
97
+ def __init__(self, d: int, h: int, r: int, use_relpos: bool = True):
98
+ super().__init__()
99
+ assert d % h == 0, "d must be divisible by number of heads"
100
+ self.h, self.dk = h, d // h
101
+ self.use_relpos = use_relpos
102
+ self.q = nn.Linear(d, d, bias=False)
103
+ self.k = nn.Linear(d, d, bias=False)
104
+ self.v = nn.Linear(d, d, bias=False)
105
+ self.U = nn.Parameter(torch.randn(self.dk, r))
106
+ nn.init.orthogonal_(self.U)
107
+ self.proj = nn.Linear(h * r, d, bias=False)
108
+ self.drop = nn.Dropout(0.1)
109
+
110
+ def _proj(self, x):
111
+ B, N, _ = x.shape
112
+ return (x.view(B, N, self.h, self.dk).transpose(1, 2) @ self.U)
113
+
114
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None,
115
+ rel_bias_tokens: Optional[int] = None,
116
+ kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
117
+ use_cache: bool = False):
118
+ q = self._proj(self.q(x))
119
+ k_new = self._proj(self.k(x))
120
+ v_new = self._proj(self.v(x))
121
+ if kv_cache is None:
122
+ k, v = k_new, v_new
123
+ else:
124
+ k, v = kv_cache
125
+ if use_cache:
126
+ k = torch.cat([k, k_new], dim=2)
127
+ v = torch.cat([v, v_new], dim=2)
128
+ att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk)
129
+ if q.size(2) == k.size(2):
130
+ if self.use_relpos and rel_bias_tokens is not None:
131
+ att = att + alibi_bias(self.h, rel_bias_tokens)
132
+ if mask is not None:
133
+ att = att + mask
134
+ z = (att.softmax(-1) @ v).transpose(1, 2)
135
+ z = z.reshape(x.size(0), x.size(1), -1)
136
+ out = self.drop(self.proj(z))
137
+ return (out, (k, v)) if use_cache else out
138
+
139
+ class Block(nn.Module):
140
+ def __init__(self, d: int, h: int, r: int):
141
+ super().__init__()
142
+ self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
143
+ self.mha = LowRankMHA(d, h, r, use_relpos=True)
144
+ self.ff = nn.Sequential(nn.Linear(d, 4 * d), nn.ReLU(), nn.Linear(4 * d, d))
145
+
146
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor],
147
+ kv: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
148
+ use_cache: bool = False):
149
+ n = x.size(1)
150
+ if use_cache:
151
+ y, new_kv = self.mha(self.ln1(x), mask, rel_bias_tokens=n if mask is not None else None, kv_cache=kv, use_cache=True)
152
+ x = x + y
153
+ x = x + self.ff(self.ln2(x))
154
+ return x, new_kv
155
+ else:
156
+ x = x + self.mha(self.ln1(x), mask, rel_bias_tokens=n)
157
+ return x + self.ff(self.ln2(x))
158
+
159
+ class Encoder(nn.Module):
160
+ def __init__(self, cfg: Dict[str, int]):
161
+ super().__init__()
162
+ d, l, h, r = cfg["d"], cfg["layers"], cfg["heads"], cfg["rank"]
163
+ self.emb = nn.Embedding(VOCAB, d)
164
+ self.blocks = nn.ModuleList([Block(d, h, r) for _ in range(l)])
165
+ self.ln = nn.LayerNorm(d)
166
+
167
+ def forward(self, ids: torch.Tensor, mask: Optional[torch.Tensor],
168
+ kv_caches: Optional[List[Optional[Tuple[torch.Tensor, torch.Tensor]]]] = None,
169
+ use_cache: bool = False):
170
+ x = self.emb(ids)
171
+ if not use_cache:
172
+ for blk in self.blocks:
173
+ x = blk(x, mask)
174
+ return self.ln(x)
175
+ new_kvs: List[Tuple[torch.Tensor, torch.Tensor]] = []
176
+ for i, blk in enumerate(self.blocks):
177
+ kv = kv_caches[i] if (kv_caches is not None) else None
178
+ x, kv_out = blk(x, mask, kv, use_cache=True)
179
+ new_kvs.append(kv_out)
180
+ return self.ln(x), new_kvs
181
+
182
+ class ARHead(nn.Module):
183
+ def __init__(self, d):
184
+ super().__init__()
185
+ self.proj = nn.Linear(d, VOCAB)
186
+ def forward(self, h): return self.proj(h)
187
+
188
+ # ─────────── Misc
189
+ def causal_mask(n: int):
190
+ m = torch.full((1, 1, n, n), float("-inf"), device=DEV)
191
+ return torch.triu(m, 1)
192
+
193
+ def _resolve_cfg_from_ckpt(sd: dict) -> Dict[str, int]:
194
+ if isinstance(sd, dict) and "cfg" in sd and isinstance(sd["cfg"], dict):
195
+ return dict(sd["cfg"])
196
+ core = sd.get("core", {})
197
+ emb_w = core.get("emb.weight")
198
+ if emb_w is None:
199
+ raise RuntimeError("Checkpoint missing core.emb.weight; cannot infer d/l/h/r.")
200
+ d = emb_w.shape[1]
201
+ layer_ids = []
202
+ for k in core.keys():
203
+ if k.startswith("blocks."):
204
+ parts = k.split(".")
205
+ if len(parts) > 2 and parts[1].isdigit():
206
+ layer_ids.append(int(parts[1]))
207
+ layers = (max(layer_ids) + 1) if layer_ids else 0
208
+ U = core.get("blocks.0.mha.U")
209
+ if U is None:
210
+ raise RuntimeError("Checkpoint missing blocks.0.mha.U; cannot infer rank/heads.")
211
+ dk, r = U.shape
212
+ h = d // dk
213
+ return {"d": d, "layers": layers, "heads": h, "rank": r}
214
+
215
+ def load_joint_from_hub(repo_id: str, filename: str):
216
+ ckpt_path = hf_hub_download(repo_id=repo_id, filename=filename)
217
+ sd = torch.load(ckpt_path, map_location="cpu")
218
+ cfg = _resolve_cfg_from_ckpt(sd)
219
+ core = Encoder(cfg).to(DEV)
220
+ ar_h = ARHead(cfg["d"]).to(DEV)
221
+ core.load_state_dict(sd["core"])
222
+ if "ar" in sd: ar_h.load_state_dict(sd["ar"])
223
+ core.eval(); ar_h.eval()
224
+ return core, ar_h, cfg
225
+
226
+ # ─────────── Chat helpers
227
+ def render_chat(messages: List[Dict[str, str]], add_generation_prompt: bool = True) -> str:
228
+ # messages: [{"role":"system/user/assistant","content": "..."}]
229
+ return tok.apply_chat_template(messages, tokenize=False, add_generation_prompt=add_generation_prompt)
230
+
231
+ def _apply_no_repeat_ngram(logits: torch.Tensor, ids: torch.Tensor, n: int):
232
+ if n <= 0 or ids.size(1) < n - 1: return logits
233
+ prefix = ids[0, -(n - 1):].tolist()
234
+ banned, tokens = [], ids[0].tolist()
235
+ for i in range(len(tokens) - n + 1):
236
+ if tokens[i:i + n - 1] == prefix:
237
+ banned.append(tokens[i + n - 1])
238
+ if banned:
239
+ banned_idx = torch.tensor(banned, device=logits.device, dtype=torch.long)
240
+ logits[..., banned_idx] = float("-inf")
241
+ return logits
242
+
243
+ def _apply_rep_presence_frequency(logits, ids, last_n, repetition_penalty, presence_penalty, frequency_penalty):
244
+ if ids.numel() == 0: return logits
245
+ hist = ids[0, -last_n:].to(torch.long) if last_n > 0 else ids[0].to(torch.long)
246
+ if hist.numel() == 0: return logits
247
+ uniq, counts = torch.unique(hist, return_counts=True)
248
+ if presence_penalty != 0.0 or frequency_penalty != 0.0:
249
+ adjust = presence_penalty + frequency_penalty * counts.to(logits.dtype)
250
+ logits[..., uniq] = logits[..., uniq] - adjust
251
+ if repetition_penalty and abs(repetition_penalty - 1.0) > 1e-6:
252
+ sel = logits[..., uniq]
253
+ sel = torch.where(sel > 0, sel / repetition_penalty, sel * repetition_penalty)
254
+ logits[..., uniq] = sel
255
+ return logits
256
+
257
+ def _filter_top_k_top_p_min_p(logits: torch.Tensor, top_k: int, top_p: float, min_p: float, temperature: float):
258
+ logits = logits / max(temperature, 1e-8)
259
+ if logits.dim() == 1: logits = logits.unsqueeze(0)
260
+ probs = logits.softmax(-1)
261
+ V = probs.size(-1)
262
+ if top_k and top_k < V:
263
+ _, idx = torch.topk(probs, top_k, dim=-1)
264
+ mask = torch.full_like(probs, 0.0); mask.scatter_(1, idx, 1.0); probs = probs * mask
265
+ if top_p < 1.0:
266
+ sorted_probs, sorted_idx = torch.sort(probs, descending=True, dim=-1)
267
+ cumsum = torch.cumsum(sorted_probs, dim=-1)
268
+ keep = cumsum <= top_p; keep[..., 0] = True
269
+ mask = torch.zeros_like(probs); mask.scatter_(1, sorted_idx, keep.to(mask.dtype))
270
+ probs = probs * mask
271
+ if min_p > 0.0:
272
+ probs = torch.where(probs >= min_p, probs, torch.zeros_like(probs))
273
+ sums = probs.sum(-1, keepdim=True); empty = (sums == 0)
274
+ if empty.any():
275
+ fallback_idx = logits.argmax(-1, keepdim=True)
276
+ probs = torch.where(empty, torch.zeros_like(probs), probs)
277
+ probs.scatter_(-1, fallback_idx, torch.where(empty, torch.ones_like(sums), torch.zeros_like(sums)))
278
+ probs = probs / probs.sum(-1, keepdim=True)
279
+ return probs
280
+
281
+ @torch.no_grad()
282
+ def chat_decode(core, ar_h, messages: List[Dict[str, str]], max_new: int = 200, T: float = 0.9,
283
+ greedy: bool = False, top_k: int = 50, top_p: float = 0.9, min_p: float = 0.0,
284
+ repetition_penalty: float = 1.1, presence_penalty: float = 0.3, frequency_penalty: float = 0.2,
285
+ penalty_last_n: int = 128, no_repeat_ngram_size: int = 3,
286
+ use_fp8: bool = False, fp8_fallback: bool = True) -> str:
287
+ prompt = render_chat(messages, add_generation_prompt=True)
288
+ ids = torch.tensor([tok.encode(prompt)], device=DEV)
289
+ prompt_len = ids.size(1)
290
+
291
+ with amp(use_fp8 or False, prefer_fp8=(use_fp8 and (_supports_fp8() or fp8_fallback))):
292
+ h_full, kvs = core(ids, causal_mask(ids.size(1)), use_cache=True)
293
+ for _ in range(max_new):
294
+ logits = ar_h(h_full)[:, -1]
295
+ logits = _apply_no_repeat_ngram(logits, ids, no_repeat_ngram_size)
296
+ logits = _apply_rep_presence_frequency(logits, ids, penalty_last_n,
297
+ repetition_penalty, presence_penalty, frequency_penalty)
298
+ if greedy:
299
+ nxt = logits.argmax(-1, keepdim=True)
300
+ else:
301
+ probs = _filter_top_k_top_p_min_p(logits.squeeze(0), top_k, top_p, min_p, T)
302
+ nxt = probs.multinomial(1)
303
+ ids = torch.cat([ids, nxt.unsqueeze(0) if nxt.dim()==1 else nxt], 1)
304
+ x = ids[:, -1:]
305
+ h_full, kvs = core(x, None, kv_caches=kvs, use_cache=True)
306
+
307
+ full_ids = ids[0].tolist()
308
+ return tok.decode(full_ids[prompt_len:], skip_special_tokens=True).strip()
309
+
310
+ # ─────────── Entrypoint
311
+ def main():
312
+ ap = argparse.ArgumentParser()
313
+ ap.add_argument("--gradio", action="store_true", help="Launch a minimal Gradio chat UI")
314
+ ap.add_argument("--fp8-only", action="store_true")
315
+ ap.add_argument("--greedy", action="store_true")
316
+ ap.add_argument("--top_k", type=int, default=50)
317
+ ap.add_argument("--top_p", type=float, default=0.9)
318
+ ap.add_argument("--temperature", type=float, default=0.9)
319
+ ap.add_argument("--max_new", type=int, default=200)
320
+ args = ap.parse_args()
321
+
322
+ # Force Gradio on HF Spaces (stdin is unavailable there)
323
+ if os.getenv("SPACE_ID"):
324
+ args.gradio = True
325
+
326
+ print(f"[init] downloading checkpoint {CKPT_NAME} from {MODEL_REPO} …", flush=True)
327
+ core, ar_h, cfg = load_joint_from_hub(MODEL_REPO, CKPT_NAME)
328
+ print(f"[ready] cfg={cfg} device={DEV.type} vocab={VOCAB}")
329
+
330
+ if args.gradio:
331
+ import gradio as gr
332
+ with gr.Blocks() as demo:
333
+ gr.Markdown("### OpenTransformer / AGILLM2 β€” Chat")
334
+ chatbox = gr.Chatbot(height=520)
335
+ with gr.Row():
336
+ msg = gr.Textbox(placeholder="Type your message…", scale=8)
337
+ send = gr.Button("Send", variant="primary", scale=1)
338
+ clear = gr.Button("Clear", scale=1)
339
+
340
+ def _chat(history, user_msg):
341
+ if not user_msg:
342
+ return history, ""
343
+ messages = [{"role":"system","content":"You are a helpful, concise assistant."}]
344
+ for u,a in history or []:
345
+ messages.append({"role":"user","content":u})
346
+ messages.append({"role":"assistant","content":a})
347
+ messages.append({"role":"user","content":user_msg})
348
+ reply = chat_decode(core, ar_h, messages, max_new=args.max_new, T=args.temperature,
349
+ greedy=args.greedy, top_k=args.top_k, top_p=args.top_p,
350
+ use_fp8=args.fp8_only, fp8_fallback=True)
351
+ history = (history or []) + [(user_msg, reply)]
352
+ return history, ""
353
+
354
+ send.click(_chat, [chatbox, msg], [chatbox, msg], queue=False)
355
+ msg.submit(_chat, [chatbox, msg], [chatbox, msg], queue=False)
356
+ clear.click(lambda: None, None, chatbox, queue=False)
357
+
358
+ demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
359
+ return
360
+
361
+ # Local-only CLI REPL
362
+ history: List[Tuple[str,str]] = []
363
+ print("Type to chat. Ctrl+C to exit.")
364
+ while True:
365
+ try:
366
+ user = input("\nYou: ").strip()
367
+ if not user:
368
+ continue
369
+ messages = [{"role":"system","content":"You are a helpful, concise assistant."}]
370
+ for u,a in history:
371
+ messages.append({"role":"user","content":u})
372
+ messages.append({"role":"assistant","content":a})
373
+ messages.append({"role":"user","content":user})
374
+ t0 = time.time()
375
+ reply = chat_decode(core, ar_h, messages, max_new=args.max_new, T=args.temperature,
376
+ greedy=args.greedy, top_k=args.top_k, top_p=args.top_p,
377
+ use_fp8=args.fp8_only, fp8_fallback=True)
378
+ dt = time.time() - t0
379
+ print(f"Bot: {reply}\n[{len(tok.encode(reply))} tok in {dt:.2f}s]")
380
+ history.append((user, reply))
381
+ except KeyboardInterrupt:
382
+ print("\nbye.")
383
+ break
384
+
385
+ if __name__ == "__main__":
386
+ main()