setup中参数 package_dir={““: “src“}和packages=find_packages(“src“)的区别

package_dir={"": "src"} 是用来告诉 setuptools 在哪个目录下查找包的。具体来说,它的作用是指定包的根目录,以便 setuptools 知道从哪里开始查找包。

结合 packages=find_packages("src") 的用法,package_dir={"": "src"} 的作用是将包目录 src 作为根目录来查找包。这样,当你在 find_packages 中指定 src 时,setuptools 就会知道这个目录是你的包的根目录,而不是默认的根目录(即项目的根目录)。

简而言之:

  • package_dir={"": "src"}: 指定了包的根目录。即告诉 setuptools 查找包的目录是 src
  • find_packages("src"): 在 src 目录下查找所有的包。

这两个参数的配合使用允许你将源码与项目的其他文件分开管理。例如,将所有的源码放在 src 目录下,而其他的文件(如配置文件、文档等)则放在项目的根目录下。这样做的好处包括:

  1. 组织结构:保持源码和其他文件的分离,使项目结构更加清晰。
  2. 避免名称冲突:避免源代码目录和项目根目录下可能存在的文件名冲突。

这也是一种常见的做法,特别是在较大的项目中。

import os import torch import transformers from transformers import ( AutoModelForCausalLM, AutoTokenizer, TrainingArguments, DataCollatorForLanguageModeling, BitsAndBytesConfig, Trainer ) from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training from datasets import load_dataset import logging import psutil import gc from datetime import datetime # === 配置区域 === MODEL_NAME = "/home/vipuser/ai_writer_project_final_with_fixed_output_ui/models/Yi-6B" DATASET_PATH = "./data/train_lora_formatted.jsonl" OUTPUT_DIR = "./yi6b-lora-optimized" DEVICE_MAP = "auto" # 使用自动设备映射 # 确保输出目录存在 os.makedirs(OUTPUT_DIR, exist_ok=True) # === 内存优化配置 === os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" # 减少内存碎片 torch.backends.cuda.cufft_plan_cache.clear() # 清理CUDA缓存 # === 增强的日志系统 === def setup_logging(output_dir): """配置日志系统,支持文件TensorBoard""" logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # 文件日志处理器 file_handler = logging.FileHandler(os.path.join(output_dir, "training.log")) file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) logger.addHandler(file_handler) # 控制台日志处理器 console_handler = logging.StreamHandler() console_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) logger.addHandler(console_handler) # TensorBoard日志目录 tensorboard_log_dir = os.path.join(output_dir, "logs", datetime.now().strftime("%Y%m%d-%H%M%S")) os.makedirs(tensorboard_log_dir, exist_ok=True) # 安装TensorBoard回调 tb_writer = None try: from torch.utils.tensorboard import SummaryWriter tb_writer = SummaryWriter(log_dir=tensorboard_log_dir) logger.info(f"TensorBoard日志目录: {tensorboard_log_dir}") except ImportError: logger.warning("TensorBoard未安装,可视化功能不可用") return logger, tb_writer logger, tb_writer = setup_logging(OUTPUT_DIR) # === 量化配置 - 使用更高效的配置 === quant_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, ) # === 加载模型 === logger.info("加载预训练模型...") model = AutoModelForCausalLM.from_pretrained( MODEL_NAME, device_map=DEVICE_MAP, quantization_config=quant_config, torch_dtype=torch.bfloat16, trust_remote_code=True, attn_implementation="flash_attention_2" # 使用FlashAttention优化内存 ) # === 分词器处理 === logger.info("加载分词器...") tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True) tokenizer.padding_side = "right" if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token tokenizer.pad_token_id = tokenizer.eos_token_id # === 准备模型训练 === model = prepare_model_for_kbit_training( model, use_gradient_checkpointing=True # 启用梯度检查点以节省内存 ) # === LoRA 配置 - 优化内存使用 === logger.info("配置LoRA...") lora_config = LoraConfig( r=64, # 降低rank以减少内存使用 lora_alpha=32, # 降低alpha值 target_modules=["q_proj", "v_proj"], # 减少目标模块 lora_dropout=0.05, bias="none", task_type="CAUSAL_LM" ) model = get_peft_model(model, lora_config) # 记录可训练参数 trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) total_params = sum(p.numel() for p in model.parameters()) logger.info(f"可训练参数: {trainable_params:,} / 总参数: {total_params:,} ({trainable_params/total_params:.2%})") # === 加载并预处理数据集 === logger.info("加载预处理数据集...") dataset = load_dataset("json", data_files=DATASET_PATH, split="train") # 文本过滤函数 def is_valid_text(example): text = example.get("text", "") return text is not None and len(text.strip()) > 200 # 增加最小长度要求 dataset = dataset.filter(is_valid_text) logger.info(f"过滤后数据集大小: {len(dataset)} 条") # 动态填充的分词函数 - 节省内存 def tokenize_function(examples): tokenized = tokenizer( examples["text"], padding=True, # 使用动态填充 truncation=True, max_length=1024, # 降低上下文长度以减少内存使用 ) # 创建 labels - 因果语言建模需要 labels = input_ids tokenized["labels"] = tokenized["input_ids"].copy() return tokenized tokenized_dataset = dataset.map( tokenize_function, batched=True, remove_columns=["text"], batch_size=64, # 降低批处理大小以减少内存峰值 num_proc=4, # 减少进程数以降低内存开销 ) # === 数据整理器 === data_collator = DataCollatorForLanguageModeling( tokenizer=tokenizer, mlm=False # 因果语言建模 ) # === 训练参数 - 优化内存使用 === report_to_list = ["tensorboard"] if tb_writer else [] training_args = TrainingArguments( output_dir=OUTPUT_DIR, per_device_train_batch_size=4, # 大幅降低批次大小 gradient_accumulation_steps=4, # 增加梯度累积步数以保持有效批次大小 learning_rate=2e-5, num_train_epochs=3, logging_steps=50, save_strategy="steps", save_steps=500, bf16=True, optim="paged_adamw_32bit", report_to=report_to_list, warmup_ratio=0.05, gradient_checkpointing=True, # 启用梯度检查点 fp16=False, max_grad_norm=0.3, # 降低梯度裁剪阈值 remove_unused_columns=True, # 移除未使用的列以节省内存 dataloader_num_workers=4, # 减少数据加载工作线程 evaluation_strategy="steps", eval_steps=500, save_total_limit=2, # 减少保存的检查点数量 logging_dir=os.path.join(OUTPUT_DIR, "logs"), load_best_model_at_end=True, ddp_find_unused_parameters=False, logging_first_step=True, group_by_length=True, lr_scheduler_type="cosine", weight_decay=0.01, ) # === GPU监控工具 === def monitor_gpu(): """监控GPU使用情况""" if torch.cuda.is_available(): device = torch.device("cuda") mem_alloc = torch.cuda.memory_allocated(device) / 1024**3 mem_reserved = torch.cuda.memory_reserved(device) / 1024**3 mem_total = torch.cuda.get_device_properties(device).total_memory / 1024**3 return { "allocated": f"{mem_alloc:.2f} GB", "reserved": f"{mem_reserved:.2f} GB", "total": f"{mem_total:.2f} GB", "utilization": f"{mem_alloc/mem_total*100:.1f}%" } return {} # === 创建训练器 === eval_dataset = None if len(tokenized_dataset) > 100: eval_dataset = tokenized_dataset.select(range(100)) trainer = Trainer( model=model, tokenizer=tokenizer, args=training_args, train_dataset=tokenized_dataset, eval_dataset=eval_dataset, data_collator=data_collator, ) # === 训练前验证 === def validate_data_and_model(): """验证数据模型是否准备好训练""" logger.info("\n=== 训练前验证 ===") # 检查样本格式 sample = tokenized_dataset[0] logger.info(f"样本键: {list(sample.keys())}") logger.info(f"input_ids 长度: {len(sample['input_ids'])}") # 创建单个样本测试批次 test_batch = data_collator([sample]) # 移动数据到设备 test_batch = {k: v.to(model.device) for k, v in test_batch.items()} # 前向传播测试 model.train() outputs = model(**test_batch) loss_value = outputs.loss.item() logger.info(f"测试批次损失: {loss_value:.4f}") # 记录到TensorBoard if tb_writer: tb_writer.add_scalar("debug/test_loss", loss_value, 0) # 反向传播测试 outputs.loss.backward() logger.info("反向传播成功!") # 重置梯度 model.zero_grad() logger.info("验证完成,准备开始训练\n") # 记录初始GPU使用情况 gpu_status = monitor_gpu() logger.info(f"初始GPU状态: {gpu_status}") # 记录到TensorBoard if tb_writer: tb_writer.add_text("system/initial_gpu", str(gpu_status), 0) validate_data_and_model() # === 自定义回调 - 监控资源使用 === class ResourceMonitorCallback(transformers.TrainerCallback): def __init__(self, tb_writer=None): self.tb_writer = tb_writer self.start_time = datetime.now() self.last_log_time = datetime.now() def on_step_end(self, args, state, control, **kwargs): current_time = datetime.now() time_diff = (current_time - self.last_log_time).total_seconds() # 每分钟记录一次资源使用情况 if time_diff > 60: self.last_log_time = current_time # GPU监控 gpu_status = monitor_gpu() logger.info(f"Step {state.global_step} - GPU状态: {gpu_status}") # CPU内存监控 cpu_percent = psutil.cpu_percent() mem = psutil.virtual_memory() logger.info(f"CPU使用率: {cpu_percent}%, 内存使用: {mem.used/1024**3:.2f}GB/{mem.total/1024**3:.2f}GB") # 记录到TensorBoard if self.tb_writer: # GPU显存使用 if torch.cuda.is_available(): device = torch.device("cuda") mem_alloc = torch.cuda.memory_allocated(device) / 1024**3 self.tb_writer.add_scalar("system/gpu_mem", mem_alloc, state.global_step) # CPU使用率 self.tb_writer.add_scalar("system/cpu_usage", cpu_percent, state.global_step) # 系统内存使用 self.tb_writer.add_scalar("system/ram_usage", mem.used/1024**3, state.global_step) def on_log(self, args, state, control, logs=None, **kwargs): """记录训练指标到TensorBoard""" if self.tb_writer and logs is not None: for metric_name, metric_value in logs.items(): if "loss" in metric_name or "lr" in metric_name or "grad_norm" in metric_name: self.tb_writer.add_scalar(f"train/{metric_name}", metric_value, state.global_step) def on_train_end(self, args, state, control, **kwargs): """训练结束时记录总时间""" training_time = datetime.now() - self.start_time logger.info(f"训练总时间: {training_time}") if self.tb_writer: self.tb_writer.add_text("system/total_time", str(training_time)) # 添加回调 trainer.add_callback(ResourceMonitorCallback(tb_writer=tb_writer)) # === 内存清理函数 === def clear_memory(): """清理内存GPU缓存""" gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.ipc_collect() logger.info("内存清理完成") # === 启动训练 === try: logger.info("开始训练...") # 分阶段训练以减少内存峰值 num_samples = len(tokenized_dataset) chunk_size = 1000 # 每次处理1000个样本 for i in range(0, num_samples, chunk_size): end_idx = min(i + chunk_size, num_samples) logger.info(f"训练样本 {i} 到 {end_idx-1} / {num_samples}") # 创建子数据集 chunk_dataset = tokenized_dataset.select(range(i, end_idx)) # 更新训练器 trainer.train_dataset = chunk_dataset # 训练当前块 trainer.train() # 清理内存 clear_memory() # 保存训练指标 metrics = trainer.evaluate() trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) # 保存最佳模型 trainer.save_model(OUTPUT_DIR) tokenizer.save_pretrained(OUTPUT_DIR) logger.info(f"训练完成! 模型保存在: {OUTPUT_DIR}") # 记录最终指标到TensorBoard if tb_writer: for metric_name, metric_value in metrics.items(): tb_writer.add_scalar(f"final/{metric_name}", metric_value) tb_writer.close() except Exception as e: logger.error(f"训练出错: {e}") import traceback logger.error(traceback.format_exc()) # 尝试更小批量训练 logger.info("\n尝试更小批量训练...") small_dataset = tokenized_dataset.select(range(50)) trainer.train_dataset = small_dataset trainer.train() # 保存模型 trainer.save_model(f"{OUTPUT_DIR}_small") tokenizer.save_pretrained(f"{OUTPUT_DIR}_small") logger.info(f"小批量训练完成! 模型保存在: {OUTPUT_DIR}_small") # 记录错误到TensorBoard if tb_writer: tb_writer.add_text("error/exception", traceback.format_exc()) # 清理内存 clear_memory() # === 训练后验证 === def validate_final_model(): """验证训练后的模型""" logger.info("\n=== 训练后验证 ===") # 加载保存的模型 from peft import PeftModel # 仅加载基础模型配置 base_model = AutoModelForCausalLM.from_pretrained( MODEL_NAME, device_map=DEVICE_MAP, quantization_config=quant_config, torch_dtype=torch.bfloat16, trust_remote_code=True, load_in_4bit=True ) # 加载LoRA适配器 peft_model = PeftModel.from_pretrained(base_model, OUTPUT_DIR) # 合并LoRA权重 merged_model = peft_model.merge_and_unload() # 测试生成 prompt = "中国的首都是" inputs = tokenizer(prompt, return_tensors="pt").to(merged_model.device) outputs = merged_model.generate( **inputs, max_new_tokens=50, # 减少生成长度 temperature=0.7, top_p=0.9, repetition_penalty=1.2, do_sample=True ) generated = tokenizer.decode(outputs[0], skip_special_tokens=True) logger.info(f"提示: {prompt}") logger.info(f"生成结果: {generated}") # 记录到TensorBoard if tb_writer: tb_writer.add_text("validation/sample", f"提示: {prompt}\n生成: {generated}") # 更全面的测试 test_prompts = [ "人工智能的未来发展趋势是", "如何学习深度学习?", "写一个关于太空探索的短故事:" ] for i, test_prompt in enumerate(test_prompts): inputs = tokenizer(test_prompt, return_tensors="pt").to(merged_model.device) outputs = merged_model.generate( **inputs, max_new_tokens=100, # 减少生成长度 temperature=0.7, top_p=0.9, repetition_penalty=1.2, do_sample=True ) generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) logger.info(f"\n提示: {test_prompt}\n生成: {generated_text}\n{'='*50}") # 记录到TensorBoard if tb_writer: tb_writer.add_text(f"validation/test_{i}", f"提示: {test_prompt}\n生成: {generated_text}") logger.info("验证完成") # 执行验证 validate_final_model() # 关闭TensorBoard写入器 if tb_writer: tb_writer.close() logger.info("TensorBoard日志已关闭") (.venv) (base) vipuser@ubuntu22:~/ai_writer_project_final_with_fixed_output_ui$ python train_lora.py 2025-07-13 22:10:19,098 - INFO - TensorBoard日志目录: ./yi6b-lora-optimized/logs/20250713-221019 2025-07-13 22:10:19,099 - INFO - 加载预训练模型... Traceback (most recent call last): File "/home/vipuser/ai_writer_project_final_with_fixed_output_ui/train_lora.py", line 77, in <module> model = AutoModelForCausalLM.from_pretrained( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/vipuser/ai_writer_project_final_with_fixed_output_ui/.venv/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py", line 566, in from_pretrained return model_class.from_pretrained( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/vipuser/ai_writer_project_final_with_fixed_output_ui/.venv/lib/python3.11/site-packages/transformers/modeling_utils.py", line 3590, in from_pretrained config = cls._autoset_attn_implementation( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/vipuser/ai_writer_project_final_with_fixed_output_ui/.venv/lib/python3.11/site-packages/transformers/modeling_utils.py", line 1389, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File "/home/vipuser/ai_writer_project_final_with_fixed_output_ui/.venv/lib/python3.11/site-packages/transformers/modeling_utils.py", line 1480, in _check_and_enable_flash_attn_2 raise ImportError(f"{preface} the package flash_attn seems to be not installed. {install_message}") ImportError: FlashAttention2 has been toggled on, but it cannot be used due to the following error: the package flash_attn seems to be not installed. Please refer to the documentation of https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2 to install Flash Attention 2.
最新发布
07-14
xyg@xyg-T6AD:~$ cd ~/ros2_ws colcon build --packages-select pca9685_hardware source install/setup.bash Starting >>> pca9685_hardware --- stderr: pca9685_hardware /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp:9:1: error: ‘CallbackReturn’ does not name a type 9 | CallbackReturn Pca9685SystemHardware::on_init(const hardware_interface::HardwareInfo& info) | ^~~~~~~~~~~~~~ /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp: In member function ‘virtual std::vector<hardware_interface::StateInterface> pca9685_hardware::Pca9685SystemHardware::export_state_interfaces()’: /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp:44:37: error: ‘HW_IF_POSITION’ is not a member of ‘hardware_interface’ 44 | hardware_interface::HW_IF_POSITION, | ^~~~~~~~~~~~~~ /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp: In member function ‘virtual std::vector<hardware_interface::CommandInterface> pca9685_hardware::Pca9685SystemHardware::export_command_interfaces()’: /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp:59:37: error: ‘HW_IF_POSITION’ is not a member of ‘hardware_interface’ 59 | hardware_interface::HW_IF_POSITION, | ^~~~~~~~~~~~~~ /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp: At global scope: /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp:67:1: error: ‘CallbackReturn’ does not name a type 67 | CallbackReturn Pca9685SystemHardware::on_activate(const rclcpp_lifecycle::State& previous_state) | ^~~~~~~~~~~~~~ /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp:126:1: error: ‘CallbackReturn’ does not name a type 126 | CallbackReturn Pca9685SystemHardware::on_deactivate(const rclcpp_lifecycle::State& previous_state) | ^~~~~~~~~~~~~~ /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp: In member function ‘virtual hardware_interface::return_type pca9685_hardware::Pca9685SystemHardware::read(const rclcpp::Time&, const rclcpp::Duration&)’: /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp:152:81: warning: unused parameter ‘time’ [-Wunused-parameter] 152 | return_type Pca9685SystemHardware::read(const rclcpp::Time& time, const rclcpp::Duration& period) | ~~~~~~~~~~~~~~~~~~~~^~~~ /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp:152:111: warning: unused parameter ‘period’ [-Wunused-parameter] 152 | ardware::read(const rclcpp::Time& time, const rclcpp::Duration& period) | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~ /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp: In member function ‘virtual hardware_interface::return_type pca9685_hardware::Pca9685SystemHardware::write(const rclcpp::Time&, const rclcpp::Duration&)’: /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp:159:82: warning: unused parameter ‘time’ [-Wunused-parameter] 159 | eturn_type Pca9685SystemHardware::write(const rclcpp::Time& time, const rclcpp::Duration& period) | ~~~~~~~~~~~~~~~~~~~~^~~~ /home/xyg/ros2_ws/src/pca9685_hardware/src/pca9685_system_hardware.cpp:159:112: warning: unused parameter ‘period’ [-Wunused-parameter] 159 | rdware::write(const rclcpp::Time& time, const rclcpp::Duration& period) | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~ gmake[2]: *** [CMakeFiles/pca9685_hardware.dir/build.make:76:CMakeFiles/pca9685_hardware.dir/src/pca9685_system_hardware.cpp.o] 错误 1 gmake[1]: *** [CMakeFiles/Makefile2:137:CMakeFiles/pca9685_hardware.dir/all] 错误 2 gmake: *** [Makefile:146:all] 错误 2 --- Failed <<< pca9685_hardware [2.25s, exited with code 2] Summary: 0 packages finished [2.39s] 1 package failed: pca9685_hardware 1 package had stderr output: pca9685_hardware not found: "/home/xyg/ros2_ws/install/pca9685_hardware/share/pca9685_hardware/local_setup.bash" xyg@xyg-T6AD:~/ros2_ws$ ros2 control list_hardware_components | grep PCA9685 # 应输出: pca9685_hardware/Pca9685SystemHardware [INFO] [1750735736.192545351] [_ros2cli_16699]: waiting for service /controller_manager/list_hardware_components to become available... [WARN] [1750735746.210548769] [_ros2cli_16699]: Could not contact service /controller_manager/list_hardware_components [INFO] [1750735746.210767288] [_ros2cli_16699]: waiting for service /controller_manager/list_hardware_components to become available... ^C 以下分别是pca9685_params.yaml:pca9685_hardware: ros__parameters: hardware: plugin: "pca9685_hardware/Pca9685SystemHardware" i2c_bus: "i2c-1" # 移除/dev/前缀,代码中会自动添加 i2c_address: "0x33" # I2C地址保持不变 channel_mapping: "5,7,9,11,13,15" # 使用逗号分隔的字符串格式 joints: - joint1 # 关节名称与机器人结构对应 - joint2 - joint3 - joint4 - joint5 - joint6然后是pca9685_system_hardware.hpp:#ifndef PCA9685_HARDWARE__PCA9685_SYSTEM_HARDWARE_HPP_ #define PCA9685_HARDWARE__PCA9685_SYSTEM_HARDWARE_HPP_ #include <hardware_interface/types/hardware_interface_return_values.hpp> #include <hardware_interface/system_interface.hpp> #include <rclcpp_lifecycle/state.hpp> #include <rclcpp/logger.hpp> #include <memory> #include <vector> #include <string> #include <fcntl.h> #include <linux/i2c-dev.h> #include <unistd.h> namespace pca9685_hardware { class Pca9685SystemHardware : public hardware_interface::SystemInterface { public: using CallbackReturn = hardware_interface::CallbackReturn; using ReturnType = hardware_interface::return_type; CallbackReturn on_init(const hardware_interface::HardwareInfo& info) override; std::vector<hardware_interface::StateInterface> export_state_interfaces() override; std::vector<hardware_interface::CommandInterface> export_command_interfaces() override; CallbackReturn on_activate(const rclcpp_lifecycle::State& previous_state) override; CallbackReturn on_deactivate(const rclcpp_lifecycle::State& previous_state) override; ReturnType read(const rclcpp::Time& time, const rclcpp::Duration& period) override; ReturnType write(const rclcpp::Time& time, const rclcpp::Duration& period) override; private: int i2c_fd_; std::string i2c_bus_; int i2c_address_; std::vector<int> channel_mapping_; std::vector<double> hw_commands_; std::vector<double> hw_states_; bool write_byte_data(int reg, uint8_t value); bool write_word_data(int reg, uint16_t value); uint8_t read_byte_data(int reg); }; } // namespace pca9685_hardware #endif // PCA9685_HARDWARE__PCA9685_SYSTEM_HARDWARE_HPP_ 然后是:pca9685_hardware_plugin.xml:<library path="libpca9685_hardware"> <class name="pca9685_hardware/Pca9685SystemHardware" type="pca9685_hardware::Pca9685SystemHardware" base_class_type="hardware_interface::SystemInterface"> <description>PCA9685 PWM控制器硬件接口</description> </class> </library> 还有:pca9685_system_hardware.cpp:#include "pca9685_hardware/pca9685_system_hardware.hpp" #include <hardware_interface/types/hardware_interface_return_values.hpp> #include <rclcpp/rclcpp.hpp> #include <cmath> namespace pca9685_hardware { CallbackReturn Pca9685SystemHardware::on_init(const hardware_interface::HardwareInfo& info) { if (SystemInterface::on_init(info) != CallbackReturn::SUCCESS) { return CallbackReturn::ERROR; } try { i2c_bus_ = info.hardware_parameters.at("i2c_bus"); i2c_address_ = std::stoi(info.hardware_parameters.at("i2c_address"), nullptr, 16); std::string channel_str = info.hardware_parameters.at("channel_mapping"); std::istringstream iss(channel_str); std::string token; while (std::getline(iss, token, ',')) { channel_mapping_.push_back(std::stoi(token)); } hw_commands_.resize(info.joints.size(), 0.0); hw_states_.resize(info.joints.size(), 0.0); return CallbackReturn::SUCCESS; } catch (const std::exception& e) { RCLCPP_ERROR(rclcpp::get_logger("pca9685_hardware"), "初始化失败: %s", e.what()); return CallbackReturn::ERROR; } } std::vector<hardware_interface::StateInterface> Pca9685SystemHardware::export_state_interfaces() { std::vector<hardware_interface::StateInterface> state_interfaces; for (size_t i = 0; i < hw_states_.size(); i++) { state_interfaces.emplace_back( hardware_interface::StateInterface( info_.joints[i].name, hardware_interface::HW_IF_POSITION, &hw_states_[i] ) ); } return state_interfaces; } std::vector<hardware_interface::CommandInterface> Pca9685SystemHardware::export_command_interfaces() { std::vector<hardware_interface::CommandInterface> command_interfaces; for (size_t i = 0; i < hw_commands_.size(); i++) { command_interfaces.emplace_back( hardware_interface::CommandInterface( info_.joints[i].name, hardware_interface::HW_IF_POSITION, &hw_commands_[i] ) ); } return command_interfaces; } CallbackReturn Pca9685SystemHardware::on_activate(const rclcpp_lifecycle::State& previous_state) { RCLCPP_INFO(rclcpp::get_logger("pca9685_hardware"), "激活硬件接口"); try { std::string device_path = "/dev/" + i2c_bus_; i2c_fd_ = open(device_path.c_str(), O_RDWR); if (i2c_fd_ < 0) { RCLCPP_ERROR(rclcpp::get_logger("pca9685_hardware"), "无法打开I2C设备: %s", device_path.c_str()); return CallbackReturn::ERROR; } if (ioctl(i2c_fd_, I2C_SLAVE, i2c_address_) < 0) { RCLCPP_ERROR(rclcpp::get_logger("pca9685_hardware"), "无法设置I2C地址: 0x%02X", i2c_address_); close(i2c_fd_); return CallbackReturn::ERROR; } // 初始化PCA9685 if (!write_byte_data(0x00, 0x01)) { // 复位 return CallbackReturn::ERROR; } usleep(50000); // 设置为普通模式 if (!write_byte_data(0x00, 0x00)) { return CallbackReturn::ERROR; } // 设置PWM频率为50Hz (20ms周期) uint8_t prescale = static_cast<uint8_t>(25000000.0 / (4096.0 * 50.0) - 1); uint8_t old_mode = read_byte_data(0x00); uint8_t new_mode = (old_mode & 0x7F) | 0x10; // 进入睡眠模式 if (!write_byte_data(0x00, new_mode)) { return CallbackReturn::ERROR; } if (!write_byte_data(0xFE, prescale)) { return CallbackReturn::ERROR; } if (!write_byte_data(0x00, old_mode)) { return CallbackReturn::ERROR; } usleep(50000); if (!write_byte_data(0x00, old_mode | 0x80)) { // 启用自动递增 return CallbackReturn::ERROR; } RCLCPP_INFO(rclcpp::get_logger("pca9685_hardware"), "硬件接口激活成功"); return CallbackReturn::SUCCESS; } catch (const std::exception& e) { RCLCPP_ERROR(rclcpp::get_logger("pca9685_hardware"), "激活失败: %s", e.what()); if (i2c_fd_ >= 0) { close(i2c_fd_); } return CallbackReturn::ERROR; } } CallbackReturn Pca9685SystemHardware::on_deactivate(const rclcpp_lifecycle::State& previous_state) { RCLCPP_INFO(rclcpp::get_logger("pca9685_hardware"), "停用硬件接口"); try { // 关闭所有PWM通道 for (size_t i = 0; i < 16; i++) { if (!write_word_data(0x06 + 4 * i, 0)) { RCLCPP_WARN(rclcpp::get_logger("pca9685_hardware"), "关闭通道 %zu 失败", i); } } if (i2c_fd_ >= 0) { close(i2c_fd_); i2c_fd_ = -1; } RCLCPP_INFO(rclcpp::get_logger("pca9685_hardware"), "硬件接口停用成功"); return CallbackReturn::SUCCESS; } catch (const std::exception& e) { RCLCPP_ERROR(rclcpp::get_logger("pca9685_hardware"), "停用失败: %s", e.what()); return CallbackReturn::ERROR; } } hardware_interface::return_type Pca9685SystemHardware::read(const rclcpp::Time& time, const rclcpp::Duration& period) { // 舵机通常没有位置反馈,所以我们将命令值作为状态返回 hw_states_ = hw_commands_; return ReturnType::OK; } hardware_interface::return_type Pca9685SystemHardware::write(const rclcpp::Time& time, const rclcpp::Duration& period) { try { for (size_t i = 0; i < hw_commands_.size(); i++) { if (i >= channel_mapping_.size()) { RCLCPP_WARN(rclcpp::get_logger("pca9685_hardware"), "通道映射越界: %zu", i); continue; } // 将弧度转换为角度 (0-180度) double angle_rad = hw_commands_[i]; double angle_deg = std::fmod(std::fmod(angle_rad, 2 * M_PI) + 2 * M_PI, 2 * M_PI); angle_deg = std::clamp(angle_deg * 180.0 / M_PI, 0.0, 180.0); // 计算PWM脉冲宽度 (500-2500us对应0-180度) uint16_t pulse_width = static_cast<uint16_t>(500 + (angle_deg / 180.0) * (2500 - 500)); uint16_t pwm_value = static_cast<uint16_t>(pulse_width * 4096.0 / 20000.0); // 设置PWM输出 int channel = channel_mapping_[i]; if (channel < 0 || channel > 15) { RCLCPP_WARN(rclcpp::get_logger("pca9685_hardware"), "无效通道: %d", channel); continue; } // 设置PWM值 (0为关闭时间,pwm_value为开启时间) if (!write_word_data(0x06 + 4 * channel, 0)) { RCLCPP_WARN(rclcpp::get_logger("pca9685_hardware"), "设置通道 %d 关闭时间失败", channel); } if (!write_word_data(0x08 + 4 * channel, pwm_value)) { RCLCPP_WARN(rclcpp::get_logger("pca9685_hardware"), "设置通道 %d 开启时间失败", channel); } } return ReturnType::OK; } catch (const std::exception& e) { RCLCPP_ERROR(rclcpp::get_logger("pca9685_hardware"), "写入失败: %s", e.what()); return ReturnType::ERROR; } } bool Pca9685SystemHardware::write_byte_data(int reg, uint8_t value) { uint8_t buf[2] = {static_cast<uint8_t>(reg), value}; if (::write(i2c_fd_, buf, 2) != 2) { RCLCPP_ERROR(rclcpp::get_logger("pca9685_hardware"), "I2C写入失败: reg=0x%02X, value=0x%02X", reg, value); return false; } return true; } bool Pca9685SystemHardware::write_word_data(int reg, uint16_t value) { uint8_t buf[3] = { static_cast<uint8_t>(reg), static_cast<uint8_t>(value & 0xFF), static_cast<uint8_t>((value >> 8) & 0xFF) }; if (::write(i2c_fd_, buf, 3) != 3) { RCLCPP_ERROR(rclcpp::get_logger("pca9685_hardware"), "I2C写入失败: reg=0x%02X, value=0x%04X", reg, value); return false; } return true; } uint8_t Pca9685SystemHardware::read_byte_data(int reg) { uint8_t buf = static_cast<uint8_t>(reg); if (::write(i2c_fd_, &buf, 1) != 1) { RCLCPP_ERROR(rclcpp::get_logger("pca9685_hardware"), "I2C写入寄存器失败: reg=0x%02X", reg); return 0; } if (::read(i2c_fd_, &buf, 1) != 1) { RCLCPP_ERROR(rclcpp::get_logger("pca9685_hardware"), "I2C读取失败: reg=0x%02X", reg); return 0; } return buf; } } // namespace pca9685_hardware #include "pluginlib/class_list_macros.hpp" PLUGINLIB_EXPORT_CLASS(pca9685_hardware::Pca9685SystemHardware, hardware_interface::SystemInterface) CMakeLists.txt:cmake_minimum_required(VERSION 3.8) project(pca9685_hardware) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wall -Wextra -Wpedantic) endif() find_package(ament_cmake REQUIRED) find_package(hardware_interface REQUIRED) find_package(rclcpp REQUIRED) find_package(rclcpp_lifecycle REQUIRED) find_package(pluginlib REQUIRED) add_library( ${PROJECT_NAME} SHARED src/pca9685_system_hardware.cpp ) target_include_directories( ${PROJECT_NAME} PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include> $<INSTALL_INTERFACE:include> ) ament_target_dependencies( ${PROJECT_NAME} "hardware_interface" "rclcpp" "rclcpp_lifecycle" "pluginlib" ) # 导出插件描述文件(确保resource目录存在) pluginlib_export_plugin_description_file(hardware_interface resource/pca9685_hardware_plugin.xml) install( TARGETS ${PROJECT_NAME} DESTINATION lib ) install( DIRECTORY include/ DESTINATION include ) # 安装插件描述文件(明确指定resource目录) install( FILES resource/pca9685_hardware_plugin.xml DESTINATION share/${PROJECT_NAME} ) ament_package() package.xml:<?xml version="1.0"?> <?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema-instance"?> <package format="3"> <name>pca9685_hardware</name> <version>0.0.1</version> <description>PCA9685 PWM Servo Driver Hardware Interface for ROS 2</description> <maintainer email="[email protected]">xyg</maintainer> <license>Apache License 2.0</license> <!-- 核心依赖 --> <depend>hardware_interface</depend> <depend>rclcpp</depend> <depend>pluginlib</depend> <depend>controller_manager</depend> <depend>rclcpp_lifecycle</depend> <!-- 测试依赖 --> <test_depend>ament_cmake</test_depend> <test_depend>ament_cmake_copyright</test_depend> <test_depend>ament_cmake_clang_format</test_depend> <test_depend>ament_cmake_cppcheck</test_depend> <!-- 移除所有Python相关依赖 --> <!-- 原test_depend已删除: ament_flake8, ament_pep257, python3-pytest --> <export> <!-- 改为ament_cmake编译类型 --> <build_type>ament_cmake</build_type> <!-- 硬件接口插件描述文件 --> <hardware_interface plugin="${prefix}/resource/plugin_description.xml" /> </export> </package> 以上是各个文件内容以及启动日志报错内容
06-25
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值