文章
用Unsloth微调DeepSeek-R1,打造你的专属医疗专家模型
操作平台:Google colab
安装依赖:
%%capture
!pip install unsloth
# 或者
!pip install --no-deps "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
!pip install bitsandbytes unsloth_zoo!pip install triton==3.2.0模型选择
1、选择一个符合您使用场景的模型
2、评估您的存储、计算能力和数据集
3、选择模型和参数
4、在基础模型和指导模型之间进行选择
from unsloth import FastLanguageModel
import torch
# 设置模型支持的最大序列长度(即上下文长度)
# 例如:2048 标示模型最多可以处理2048个token的输入(包括prompt和生成的内容)
max_seq_length = 2048
# 指定模型权重的数据类型(精度)
# None:自动检测最适合,当前 GPU 的类型(推荐)
# torch.float16:适用于 Tesla T4、V100 等不支持 bfloat16的显卡
# torch.bfloat16:适用于支持 bfloat16 的现代显卡(如A100、H100、RTX 30/40系列)能提供更好的精度且节省显存
dtype = None
# 是否使用 4-bit 量化加载模型
# True:启用 4bit 量化,大幅减少显存占用(适合显存较小的设备)
# False:以全精度(如float16)加载模型,显存需求更高但推理精度略好
# 推荐在资源有限时使用True,否则可设为False提升性能
load_in_4bit = True
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/DeepSeek-R1-Distill-Llama-8B", # 模型名称或路径
max_seq_length = max_seq_length, # 设置最大上下文长度
dtype = dtype, # 权重数据类型,由前面变量指定,None标识自动选择
load_in_4bit = load_in_4bit # 是否使用 4bit 量化加载模型,减少显存消耗
)提示词:
prompt_style = """
Below is an instruction that describes a task, paired with an input that provides further context.
Write a response that appropriately completes the request.
Before answering, think carefully about the question and create a step-by-step chain of thoughts to ensure a logical and accurate answer.
### Instruction:
You are a medical expert with advanced knowledge in clinical reasoning, diagnostics, and treatment planning.
Please answer the following medical question.
### Question:
{}
### Response:
<think>{}</think>
"""
训练前问题测试:
question = "一个患有记性阑尾炎的病人已经发病5天,腹痛稍有减轻但仍发热,在体检时发现右下腹有压痛的包块,此时应如何处理?"
FastLanguageModel.for_inference(model)
inputs = tokenizer([prompt_style.format(question, "")], return_tensors="pt").to("cuda")
outputs = model.generate(
input_ids=inputs.input_ids,
attention_mask=inputs.attention_mask,
max_new_tokens=1200,
use_cache=True
)
response = tokenizer.batch_decode(outputs)
print(response[0].split("### Response:")[1])准备数据集
使用医疗数据集:
https://hf-mirror.com/datasets/FreedomIntelligence/medical-o1-reasoning-SFT
训练选定的模型
训练提示词:
train_prompt_style = """
Below is an instruction that describes a task, paired with an input that provides further context.
Write a response that appropriately completes the request.
Before answering, think carefully about the question and create a step-by-step chain of thoughts to ensure a logical and accurate answer.
### Instruction:
You are a medical expert with advanced knowledge in clinical reasoning,
diagnostics, and treatment planning.
Please answer the following medical question.
### Question:
{}
### Response:
<think>
{}
</think>
{}
"""
数据预处理
在每个训练数据集条目的末尾添加EOS(序列结束)标记至关重要,否则可能出现无限生成的情况。
EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN
def formatting_prompts_func(examples):
inputs = examples["Question"]
cots = examples["Complex_CoT"]
outputs = examples["Response"]
texts = []
for input_, cot, output in zip(inputs, cots, outputs):
text = train_prompt_style.format(input_, cot, output) + EOS_TOKEN
texts.append(text)
return {
"text": texts,
}
从hf中远程下载数据集
from datasets import load_dataset
dataset = load_dataset("FreedomIntelligence/medical-o1-reasoning-SFT", 'zh', split="train[0:500]", trust_remote_code=True)
print(dataset.column_names)dataset = dataset.map(formatting_prompts_func, batched=True)
dataset["text"][0]模型训练
model = FastLanguageModel.get_peft_model(
model,
r = 16, # LoRA 秩,建议取值8,16,32,64,128(越大越强,显存占用越高)
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], # 要应用LoRA的模块
lora_alpha = 16, # LoRA的alpha参数,通常与r成比例
lora_dropout = 0, # LoRA层的dropout概率,设为0可优化性能
bias = "none", # 偏置参数训练方式 none最省资源
use_gradient_checkpointing = "unsloth", # 使用unsloth的梯度检查点,显存最少减少30%,支持更大batch
random_state = 3407, # 随机种子 保证实现可复现
use_rslora = False, # 是否使用秩稳定 LoRA(RSLora)
loftq_config = None # 是否使用LoftQ量化初始化(需配合量化)
)from trl import SFTTrainer
from transformers import TrainingArguments
from unsloth import is_bfloat16_supported
# 使用 SFTTrainer(监督微调训练器)进行指令微调
trainer = SFTTrainer(
model = model, # 需要微调的模型(已加载LoRA)
tokenizer = tokenizer, # 对应的分词器
train_dataset = dataset, # 训练数据集,格式为Hugging Face的DataSet对象
dataset_text_field = "text", # 数据集中包含文本的字段名,用于打包或格式化输入
max_seq_length = max_seq_length, # 最大序列长度,与模型设置一致
dataset_num_proc = 2, # 数据预处理时使用的进程数,加快数据处理速度
packing = False, # 是否将多个短样本打包成一个长样本(True 可提升短序列训练速度,最多5倍,若样本较长或长度不一,建议设为False)
args = TrainingArguments(
per_device_train_batch_size = 2, # 每张GPU的训练批次大小
gradient_accumulation_steps = 4, # 梯度累计步数,等效于batch size扩大4倍
warmup_steps = 5, # 学习率预热步数,防止初期训练不稳定
max_steps = 60, # 总训练步数(若设置,则覆盖num_train_epochs)
# num_train_epochs = 1, # 训练轮数(可替代max_steps 用于更长训练)
learning_rate = 2e-4, # 学习率 LoRA通常用2e-4、5e-4
fp16 = not is_bfloat16_supported(), # 若GPU不支持bfloat16则使用float16
bf16 = is_bfloat16_supported(), # 若GPU支持bfloat16(如A100、H100、 RTX 30/40系列)优先使用,更高效
logging_steps = 1, # 每几步输出一次日志
optim = "adamw_8bit", # 使用8bit优化器(AdamW)节省显存
weight_decay = 0.01, # 权重衰减,防止过拟合
lr_scheduler_type = "linear", # 学习率调度策略:线性预热后下降
seed = 3407, # 随机种子,保证结果可复现
output_dir = "outputs", # 输出目录,保存模型检查点等
report_to = "none" # 不连接外部监控工具 如WandB、TensorBoard
)
)微调后推理
让我们使用同样问题再推断一下,看看有什么不同。
print(question)FastLanguageModel.for_inference(model)
inputs = tokenizer([prompt_style.format(question, "")], return_tensors="pt").to("cuda")
outputs = model.generate(
input_ids=inputs.input_ids,
attention_mask=inputs.attention_mask,
max_new_tokens=1200,
use_cache=True
)
response = tokenizer.batch_decode(outputs)
print(response[0].split("### Response:")[1])