Files
openharmony-mlx/tests/smoke_metal.py

17 lines
658 B
Python

# smoke_metal.py —— 用 metal 后端直接跑 8 个新 token
from gpt_oss.responses_api.inference.metal import setup_model
from openai_harmony import load_harmony_encoding, HarmonyEncodingName
enc = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
infer_next_token = setup_model("/Volumes/long990max/project/openharmony-mlx/model.bin") # 改成你的路径
ids = enc.encode("你好,给我一句话的回答:(英文)")[:128] # 输入 ids
new = []
for _ in range(8):
tid = infer_next_token(ids + new, temperature=0.7, new_request=False)
new.append(tid)
print("new token ids:", new)
print("decoded:", enc.decode(ids + new))