{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "1244b8f2", "metadata": {}, "outputs": [], "source": [ "from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer\n", "from datasets import load_dataset\n", "from peft import LoraConfig, get_peft_model, TaskType\n", "import torch\n", "\n", "model_id = \"Qwen/Qwen3-0.6B\"\n", "\n", "tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)\n", "model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, device_map=\"auto\")\n", "\n", "# 添加 LoRA\n", "lora_config = LoraConfig(\n", " r=8,\n", " lora_alpha=32,\n", " target_modules=[\"q_proj\", \"v_proj\"], # 根据模型结构微调\n", " lora_dropout=0.1,\n", " bias=\"none\",\n", " task_type=TaskType.CAUSAL_LM\n", ")\n", "\n", "model = get_peft_model(model, lora_config)\n", "\n", "# 加载你自己的数据\n", "data = load_dataset(\"json\", data_files=\"E:\\\\work_yusys\\\\gpt_teach\\\\code\\\\data_negative.jsonl\", split=\"train\")\n", "# Tokenize\n", "def tokenize_fn(example):\n", " prompt = example[\"instruction\"]\n", " output = example[\"output\"]\n", "\n", " # 分别 tokenize prompt 和 output\n", " prompt_tokens = tokenizer(prompt, truncation=True, max_length=128)\n", " output_tokens = tokenizer(output, truncation=True, max_length=128)\n", "\n", " # 拼接 input_ids 和 attention_mask\n", " input_ids = prompt_tokens[\"input_ids\"] + output_tokens[\"input_ids\"]\n", " attention_mask = [1] * len(input_ids)\n", "\n", " # padding 到最大长度\n", " max_len = 256\n", " padding_len = max_len - len(input_ids)\n", " input_ids = input_ids + [tokenizer.pad_token_id] * padding_len\n", " attention_mask = attention_mask + [0] * padding_len\n", "\n", " # labels 对 prompt 部分设置 -100,忽略loss,只计算 output 部分\n", " labels = [-100] * len(prompt_tokens[\"input_ids\"]) + output_tokens[\"input_ids\"]\n", " labels = labels + [-100] * padding_len\n", "\n", " return {\n", " \"input_ids\": input_ids,\n", " \"attention_mask\": attention_mask,\n", " \"labels\": labels\n", " }\n", "\n", "\n", "tokenized = data.map(tokenize_fn)\n", "\n", "# 设置训练参数\n", "training_args = TrainingArguments(\n", " output_dir=\"./qwen3_lora_output\",\n", " per_device_train_batch_size=1,\n", " gradient_accumulation_steps=2,\n", " num_train_epochs=10,\n", " logging_steps=10,\n", " save_steps=50,\n", " save_total_limit=2,\n", " fp16=True,\n", " learning_rate=5e-5,\n", ")\n", "\n", "trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", " train_dataset=tokenized,\n", ")\n", "\n", "trainer.train()\n" ] }, { "cell_type": "code", "execution_count": null, "id": "ef24bd4d", "metadata": {}, "outputs": [], "source": [ "import torch\n", "torch.cuda.empty_cache()" ] } ], "metadata": { "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 5 }