解决:bert_score无法加载本地模型

相信很多小伙伴平时都使用内网进行工作,这些网络是无法连接huggingface的,使用魔塔加载模型网络断断续续的很容易失败。但是bert_score只接收一个模型名,然后自动在huggingface下载或在本地缓存加载。这个缓存跟huggingface官方缓存是不同的。

解决办法1:修改bert_score源码。bert_score虽说是只接受模型名,但内部还是通过AutoTokenizer.from_pretrained和AutoModel.from_pretrained这两个方法加载模型,相信这两个方法大家都很熟悉了。因此只需要在源码中添加自己的model_path,并且把源码中的model_type这个参数改为model_path

源码:

def get_model(model_type, num_layers, all_layers=None):
    if model_type.startswith("scibert"):
        model = AutoModel.from_pretrained(cache_scibert(model_type))
    elif "t5" in model_type:
        from transformers import T5EncoderModel

        model = T5EncoderModel.from_pretrained(model_type)
    else:
        model = AutoModel.from_pretrained(model_type)
    model.eval()

def get_tokenizer(model_type, use_fast=False):
    if model_type.startswith("scibert"):
        model_type = cache_scibert(model_type)

    if version.parse(trans_version) >= version.parse("4.0.0"):
        tokenizer = AutoTokenizer.from_pretrained(model_type, use_fast=use_fast)
    else:
        assert not use_fast, "Fast tokenizer is not available for version < 4.0.0"
        tokenizer = AutoTokenizer.from_pretrained(model_type)

    return tokenizer

改为:

def get_model(model_type, num_layers, all_layers=None):
    model_path = 'xxx'
    if model_type.startswith("scibert"):
        model = AutoModel.from_pretrained(cache_scibert(model_type))
    elif "t5" in model_type:
        from transformers import T5EncoderModel

        model = T5EncoderModel.from_pretrained(model_path)
    else:
        model = AutoModel.from_pretrained(model_path)
    model.eval()


def get_tokenizer(model_type, use_fast=False):
    if model_type.startswith("scibert"):
        model_type = cache_scibert(model_type)

    model_path = 'xxx'
    if version.parse(trans_version) >= version.parse("4.0.0"):
        tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=use_fast)
    else:
        assert not use_fast, "Fast tokenizer is not available for version < 4.0.0"
        tokenizer = AutoTokenizer.from_pretrained(model_path)

    return tokenizer

解决办法2:让bert_score找到缓存模型,相信聪明的小伙伴已经在前面的代码中看到bert_score是如何加载缓存模型的。如果要加载缓存模型,model_type字段加载的模型前要加scibert-前缀。并且需要把本地模型放在指定的目录下。可以看出这个函数下载的模型有它自己的命名规则,需要根据它的规则对自己的模型文件做出相应修改。

下面给出部分源码

def cache_scibert(model_type, cache_folder="~/.cache/torch/transformers"):
    if not model_type.startswith("scibert"):
        return model_type

    underscore_model_type = model_type.replace("-", "_")
    cache_folder = os.path.abspath(os.path.expanduser(cache_folder))
    filename = os.path.join(cache_folder, underscore_model_type)

from transformers import BertTokenizer, BertForSequenceClassification, TrainingArguments, Trainer from datasets import load_from_disk import numpy as np from sklearn.metrics import accuracy_score def load_processed_data(): dataset = load_from_disk("../data/processed_imdb") return dataset['train'], dataset['test'] # 定义预处理函数 def preprocess_function(examples): return tokenizer( examples['text'], truncation=True, padding='max_length', max_length=256 ) # 定义评估指标 def compute_metrics(p): preds = np.argmax(p.predictions, axis=1) return {'accuracy': accuracy_score(p.label_ids, preds)} if __name__ == '__main__': train_data, test_data = load_processed_data() # 加载BERT的分词器 tokenizer = BertTokenizer.from_pretrained("C:/Users/Administrator/bert_cache") # 应用预处理 train_data = train_data.map(preprocess_function, batched=True) test_data = test_data.map(preprocess_function, batched=True) # 重命名标签列(适配HuggingFace格式) train_data = train_data.rename_column('label', 'labels') test_data = test_data.rename_column('label', 'labels') # 设置pytorch格式 train_data.set_format('torch', columns=['input_ids', 'attention_mask', 'labels']) test_data.set_format('torch', columns=['input_ids', 'attention_mask', 'labels']) # 加载预训练模型 model = BertForSequenceClassification.from_pretrained( "C:/Users/Administrator/bert_cache", num_labels=2 ) training_args = TrainingArguments( output_dir='./results', # 输出目录 num_train_epochs=3, # 训练轮次 per_device_train_batch_size=32, # 训练批次大小 per_device_eval_batch_size=128, # 评估批次大小 warmup_steps=500, # 学习率预热步数 weight_decay=0.01, # 权重衰减 logging_dir='./logs', # 日志目录 eval_strategy='epoch', # 每轮评估一次 save_strategy='epoch' ) # 创建Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_data, eval_dataset=test_data, # 实际应用时应使用验证集 compute_metrics=compute_metrics ) # 开始训练! trainer.train() # 保存模型 model.save_pretrained('./my_bert_model') tokenizer.save_pretrained('./my_bert_model') # 加载模型 # from_pretrained_model = BertForSequenceClassification.from_pretrained('./my_bert_model')
04-04
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值