1234567891011121314151617181920 |
- from transformers import AutoTokenizer, AutoModelForCausalLM
- model_id = "Qwen/Qwen3-0.6B"
- # 加载 tokenizer 和模型
- tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
- model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).eval()
- # 支持 GPU(如可用)
- import torch
- device = "cuda" if torch.cuda.is_available() else "cpu"
- model = model.to(device)
- # 推理
- prompt = "你好,请介绍一下你自己。"
- inputs = tokenizer(prompt, return_tensors="pt").to(device)
- outputs = model.generate(**inputs, max_new_tokens=100)
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
- print(response)
|