run_qwen3.py 660 B

1234567891011121314151617181920
  1. from transformers import AutoTokenizer, AutoModelForCausalLM
  2. model_id = "Qwen/Qwen3-0.6B"
  3. # 加载 tokenizer 和模型
  4. tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
  5. model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).eval()
  6. # 支持 GPU(如可用)
  7. import torch
  8. device = "cuda" if torch.cuda.is_available() else "cpu"
  9. model = model.to(device)
  10. # 推理
  11. prompt = "你好,请介绍一下你自己。"
  12. inputs = tokenizer(prompt, return_tensors="pt").to(device)
  13. outputs = model.generate(**inputs, max_new_tokens=100)
  14. response = tokenizer.decode(outputs[0], skip_special_tokens=True)
  15. print(response)