{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "1609c054", "metadata": {}, "outputs": [], "source": [ "from transformers import AutoModelForCausalLM\n", "model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-0.6B\", trust_remote_code=True)\n", "print(model)" ] }, { "cell_type": "code", "execution_count": null, "id": "d8d3fa97", "metadata": {}, "outputs": [], "source": [ "from transformers import AutoModelForCausalLM, AutoTokenizer\n", "from peft import PeftModel\n", "model_name = \"Qwen/Qwen3-0.6B\"\n", "\n", "# load the tokenizer and the model\n", "tokenizer = AutoTokenizer.from_pretrained(model_name)\n", "base_model = AutoModelForCausalLM.from_pretrained(\n", " model_name,\n", " trust_remote_code=True,\n", " torch_dtype=\"auto\",\n", " device_map=\"auto\"\n", ")\n", "\n", "\n", "\n", "# === 加载 PEFT adapter(LoRA 权重) ===\n", "model = PeftModel.from_pretrained(base_model, r'E:\\work_yusys\\gpt_teach\\code\\qwen3_lora_output\\checkpoint-130')\n", "model.eval()\n", "print(model)" ] }, { "cell_type": "code", "execution_count": null, "id": "3bccd8f0", "metadata": {}, "outputs": [], "source": [ "\n", "def chatFunc(prompt: str):\n", "\n", " messages = [\n", " {\"role\": \"system\", \"content\": \"You are a helpful assistant. Determine whether the sentiment entered by the user is positive or negative. Note that only positive or negative cases are output.Avoid being ambiguous pleases.\"},\n", " {\"role\": \"user\", \"content\": prompt}\n", " ]\n", " text = tokenizer.apply_chat_template(\n", " messages,\n", " tokenize=False,\n", " add_generation_prompt=True,\n", " enable_thinking=True \n", " )\n", " model_inputs = tokenizer([text], return_tensors=\"pt\").to(model.device)\n", " \n", " generated_ids = model.generate(\n", " **model_inputs,\n", " max_new_tokens=32768\n", " )\n", " output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() \n", "\n", "\n", " try:\n", " \n", " index = len(output_ids) - output_ids[::-1].index(151668)\n", " except ValueError:\n", " index = 0\n", " content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip(\"\\n\")\n", " print(\"content:\", content)\n", " return content\n", " # print(\"thinking content:\", thinking_content)\n", " " ] }, { "cell_type": "code", "execution_count": null, "id": "5b85e895", "metadata": {}, "outputs": [], "source": [ "import glob\n", "\n", "positive_txt_files = glob.glob('C:\\\\Users\\\\28191\\\\Desktop\\\\xuexi_py\\\\xuexi_git\\\\ai_learning\\\\data\\\\acllmdb_sentiment_small\\\\positive\\\\*.txt', recursive=True)\n", "negative_txt_files = glob.glob('C:\\\\Users\\\\28191\\\\Desktop\\\\xuexi_py\\\\xuexi_git\\\\ai_learning\\\\data\\\\acllmdb_sentiment_small\\\\negative\\\\*.txt', recursive=True)\n", "res_list = []\n", "fail_txt_list = []\n", "count = 0.0\n", "ca = 0.0\n", "for index, file_path in enumerate(negative_txt_files, start=0):\n", " print(f\"找到文件: {file_path}\")\n", " count+=1\n", " with open(file_path, 'r', encoding='utf-8') as f:\n", " content = f.read() # 读取所有内容\n", " res = chatFunc(content)\n", " if 'negative' in res or 'Negative' in res: \n", " ca+=1\n", " else:\n", " fail_txt_list.append(res+content)\n", " res_list.append(res)\n", "\n", "for index, file_path in enumerate(positive_txt_files, start=0):\n", " print(f\"找到文件: {file_path}\")\n", " count+=1\n", " with open(file_path, 'r', encoding='utf-8') as f:\n", " content = f.read() # 读取所有内容\n", " res = chatFunc(content)\n", " if 'positive' in res or 'Positive' in res: \n", " ca+=1\n", " else:\n", " fail_txt_list.append(res+content)\n", " res_list.append(res) \n" ] }, { "cell_type": "code", "execution_count": null, "id": "a019b295", "metadata": {}, "outputs": [], "source": [ "for fail_txt in fail_txt_list:\n", " print(fail_txt) " ] }, { "cell_type": "code", "execution_count": null, "id": "c3f7cdb4", "metadata": {}, "outputs": [], "source": [ "print(\"count::>\" + str(count) + \" ac:::>\" + str(ca) + \" accuracy:::>\" + str(ca/count))" ] } ], "metadata": { "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 5 }