ROS常用模板

目录

一、C++图片——图片增强

1.CMakeLists.txt

2.package.xml

3.launch 文件

4.cpp

二、C++点云——点云投影

1.CMakeLists.txt

2.package.xml

3. launch

4. cpp

三、python——yolov5神经网络

四、python——图片除雾

五、python——接收点云


一、C++图片——图片增强

1.CMakeLists.txt

cmake_minimum_required(VERSION 2.8.3)
project(image_enhance)

add_compile_options(-std=c++11)
find_package(catkin REQUIRED COMPONENTS roscpp cv_bridge image_transport sensor_msgs roscpp rospy std_msgs)
find_package(OpenCV REQUIRED)

catkin_package(
#  INCLUDE_DIRS include
#  LIBRARIES my_image_transport
#  CATKIN_DEPENDS cv_bridge image_transport sensor_msgs
#  DEPENDS system_lib
)

include_directories(  ${catkin_INCLUDE_DIRS}  ${OpenCV_INCLUDE_DIRS} )


add_executable(${PROJECT_NAME}_node src/${PROJECT_NAME}.cpp)
add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
target_link_libraries(${PROJECT_NAME}_node ${catkin_LIBRARIES} ${OpenCV_LIBRARIES})

2.package.xml

<!-- -->

<launch>
	<node pkg="image_enhance" type="image_enhance_node" name="image_enhance_node" output="screen">
		<param name ="image_topic" value="/pub_t"/>
		<param name ="frame_rate" value="30"/>
		<param name ="mode" value="2"/>
	</node>

</launch>

3.launch 文件

<!-- -->

<launch>
	<node pkg="image_enhance" type="image_enhance_node" name="image_enhance_node" output="screen">
		<param name ="image_topic" value="/pub_t"/>
		<param name ="frame_rate" value="30"/>
		<param name ="mode" value="2"/>
	</node>

</launch>

4.cpp

#include <ros/ros.h>
#include <cv_bridge/cv_bridge.h>//用于ROS图像和OpenCV图像的转换
#include <sensor_msgs/CameraInfo.h>//传感器信息
#include<cmath>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <string>
#define _NODE_NAME_ "image_enhancement" //定义节点的名称
//OpenCV的函数都位于cv这一命名空间下,为了调用OpenCV的函数,需要在每个函数前加上cv::,向编译器说明你所调用的函数处于cv命名空间。为了摆脱这种繁琐的工作,可以使用using namespace cv;指令,告诉编译器假设所有函数都位于cv命名空间下。
using namespace cv;
using namespace std;

class ImageEnhancement  //节点参数类
{
private://基本参数
	ros::Publisher enhance_image_pub_;//发布者
	ros::Subscriber image_sub_;//接收者
	std::string image_topic_;//信息类型
	bool is_show_result_;
	bool image_ok_, image_enhanced_;
	int frame_rate_;
	int mode_;
	//0 for 基于直方图均衡化的图像增强 
	//1 for 基于对数Log变换的图像增强
	//2 for 基于伽马变换的图像增强
        //3 for 混合增强
	cv_bridge::CvImagePtr cv_ptr_;
        cv_bridge::CvImagePtr cv_ptr2;
	ros::Timer timer_;
	
public://基本函数
	bool init();
	void loadimage(const sensor_msgs::ImageConstPtr& msg);
	void enhancepub0(const ros::TimerEvent&);
	void enhancepub1(const ros::TimerEvent&);
	void enhancepub2(const ros::TimerEvent&);
        void enhancepub3(const ros::TimerEvent&);
};

int main(int argc, char** argv)
{
	ros::init(argc, argv, _NODE_NAME_);
	ImageEnhancement enhance;//创建一个类的对象
	enhance.init();//调用类的成员函数
	ros::spin();//循环
	return 0;
}



bool ImageEnhancement::init()//定义ImageEnhancement类的成员函数
{
	ros::NodeHandle nh, nh_private("~");//开启节点对象nh
        //节点的参数服务器,写在launch文件中的可以随时修改的参数
	nh_private.param<std::string>("image_topic", image_topic_, "");
	nh_private.param<int>("frame_rate",frame_rate_,30);
	nh_private.param<int>("mode",mode_,0);

	image_ok_ = false;//一个关闭标志
	image_enhanced_ = false;
	enhance_image_pub_ = nh.advertise<sensor_msgs::Image>("/image_enhancement", 1);//定义发布者

	image_sub_ = nh.subscribe(image_topic_, 1, &ImageEnhancement::loadimage, this);//定义订阅image_topic_话题的消息,回调函数,以指针形式存储数据  回调函数在一个类中,后面加上this
	
	if(mode_ == 0)
		timer_ = nh.createTimer(ros::Duration(0.01), &ImageEnhancement::enhancepub0, this);
	else if(mode_ == 1)
		timer_ = nh.createTimer(ros::Duration(0.01), &ImageEnhancement::enhancepub1, this);
	else if(mode_ == 2)
		timer_ = nh.createTimer(ros::Duration(0.01), &ImageEnhancement::enhancepub2, this);
	else
		ROS_ERROR("none mode is starting!");//报错打印指令
	ROS_INFO("image_enhancement initial ok.");//提示打印指令
}

void ImageEnhancement::loadimage(const sensor_msgs::ImageConstPtr& msg)
{
	ROS_INFO("[%s]: getting image!",_NODE_NAME_);
	cv_bridge::CvImagePtr cv;//在CVbridge类中创建一个对象cv
	cv = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);//转化储存图像
	cv_ptr_ = cv;
	
	image_ok_ = true;
	image_enhanced_ = false;
}

void ImageEnhancement::enhancepub0(const ros::TimerEvent&)
{
	if (image_ok_ == false)
	{
		ROS_ERROR("[%s]: no image input!",_NODE_NAME_);
		return;
	}
	else if (image_enhanced_ == true)
	{
		ROS_INFO("[%s]: waiting for new image!",_NODE_NAME_);
		return;
	}
	else
		ROS_INFO("[%s]: image enhancement start! mode:0",_NODE_NAME_);
		
	static int width, height;
	width = cv_ptr_->image.cols;
	height = cv_ptr_->image.rows;
        //创建一个enhanced_image对象
	cv::Mat enhanced_image(height, width, CV_8UC3);//cv::Mat是OpenCV2和OpenCV3中基本的数据类型长宽和文件格式
	enhanced_image.setTo(0);//初始化增强后的图
	cv_ptr_->image.copyTo(enhanced_image(Rect(0, 0, width, height)));

	cv::Mat imageRGB[3];
	split(enhanced_image, imageRGB);
	for (int i=0; i<3; ++i)
    {
        equalizeHist(imageRGB[i], imageRGB[i]);
    }
    merge(imageRGB, 3, enhanced_image);
    //定义发布消息的内容
    sensor_msgs::ImagePtr imageMsg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", enhanced_image).toImageMsg();
	imageMsg->header.frame_id = std::string("enhance image");
	imageMsg->header.stamp = ros::Time::now();
	enhance_image_pub_.publish(imageMsg);//发布消息
	ROS_INFO("[%s]: image enhancement done!",_NODE_NAME_);
	image_enhanced_ = true;
}

void ImageEnhancement::enhancepub1(const ros::TimerEvent&)
{
	if (image_ok_ == false)
	{
		ROS_ERROR("[%s]: no image input!",_NODE_NAME_);
		return;
	}
	else if (image_enhanced_ == true)
	{
		ROS_INFO("[%s]: waiting for new image!",_NODE_NAME_);
		return;
	}
	else
		ROS_INFO("[%s]: image enhancement start! mode:0",_NODE_NAME_);
		
	static int width, height;
	width = cv_ptr_->image.cols;
	height = cv_ptr_->image.rows;
	cv::Mat enhanced_image(height, width, CV_8UC3);
	enhanced_image.setTo(0);
	cv_ptr_->image.copyTo(enhanced_image(Rect(0, 0, width, height)));
	
	cv::Mat srcLog(enhanced_image.size(), CV_32FC3);
	
	for (int i=0; i<enhanced_image.rows; ++i)
    {
        for (int j=0; j<enhanced_image.cols; ++j)
        {
            srcLog.at<Vec3f>(i, j)[0] = log(1 + enhanced_image.at<Vec3b>(i, j)[0]);
            srcLog.at<Vec3f>(i, j)[1] = log(1 + enhanced_image.at<Vec3b>(i, j)[1]);
            srcLog.at<Vec3f>(i, j)[2] = log(1 + enhanced_image.at<Vec3b>(i, j)[2]);
        }
    }
        //归一化
	normalize(srcLog, srcLog, 0, 255, NORM_MINMAX);
        //
	convertScaleAbs(srcLog, srcLog);
	
    sensor_msgs::ImagePtr imageMsg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", srcLog).toImageMsg();
	imageMsg->header.frame_id = std::string("enhance image");
	imageMsg->header.stamp = ros::Time::now();
	enhance_image_pub_.publish(imageMsg);
	ROS_INFO("[%s]: image enhancement done!",_NODE_NAME_);
	image_enhanced_ = true;
}

void ImageEnhancement::enhancepub2(const ros::TimerEvent&)
{
	if (image_ok_ == false)
	{
		ROS_ERROR("[%s]: no image input!",_NODE_NAME_);
		return;
	}
	else if (image_enhanced_ == true)
	{
		ROS_INFO("[%s]: waiting for new image!",_NODE_NAME_);
		return;
	}
	else
		ROS_INFO("[%s]: image enhancement start! mode:0",_NODE_NAME_);
		
	static int width, height;
	width = cv_ptr_->image.cols;
	height = cv_ptr_->image.rows;
	cv::Mat enhanced_image(height, width, CV_8UC3);
	enhanced_image.setTo(0);
	cv_ptr_->image.copyTo(enhanced_image(Rect(0, 0, width, height)));
	
	
	float pixels[256];
	for (int i=0; i<256; ++i)
    {
         pixels[i] = powf(i,2);
    }

    cv::Mat srcLog(enhanced_image.size(), CV_32FC3);
    for (int i=0; i<enhanced_image.rows; ++i)
    {
        for (int j=0; j<enhanced_image.cols; ++j)
        {
			srcLog.at<Vec3f>(i, j)[0] = pixels[enhanced_image.at<Vec3b>(i, j)[0]];
			srcLog.at<Vec3f>(i, j)[1] = pixels[enhanced_image.at<Vec3b>(i, j)[1]];
			srcLog.at<Vec3f>(i, j)[2] = pixels[enhanced_image.at<Vec3b>(i, j)[2]];        
        }
    }
    normalize(srcLog, srcLog, 0, 255, NORM_MINMAX);
    //Mat img;
    //resize(srcLog, img, Size(640, 360), 0, 0, INTER_CUBIC);
    convertScaleAbs(srcLog, srcLog, 4, 30); 
	resize(srcLog, srcLog,Size(1920,1440),INTER_LINEAR);
    medianBlur(srcLog,srcLog, 3);
 

    sensor_msgs::ImagePtr imageMsg = cv_bridge::CvImage(std_msgs::Header(), "bgr8",srcLog).toImageMsg();
	imageMsg->header.frame_id = std::string("enhance image");
	imageMsg->header.stamp = ros::Time::now();
	enhance_image_pub_.publish(imageMsg);
	image_enhanced_ = true;
	ROS_INFO("[%s]: image enhancement done!",_NODE_NAME_);
}

二、C++点云——点云投影

1.CMakeLists.txt

cmake_minimum_required(VERSION 2.8.3)
project(kittivelo_cam)
add_compile_options(-std=c++11)
set(CMAKE_BUILD_TYPE Release)

find_package(catkin REQUIRED COMPONENTS
    cv_bridge
    image_transport
    message_filters
    roscpp
    rospy
    std_msgs
    tf
    pcl_conversions
    tf2
    tf2_ros
    tf2_geometry_msgs
)

catkin_package(
#  INCLUDE_DIRS include
#  LIBRARIES colored_pointcloud
#  CATKIN_DEPENDS cv_bridge image_transport message_filters roscpp rospy std_msgs
#  DEPENDS system_lib
)

include_directories(
  include
  ${catkin_INCLUDE_DIRS}
)

# opencv
find_package(OpenCV 3 REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})

# pcl
find_package(PCL 1.7 REQUIRED)
include_directories(${PCL_INCLUDE_DIRS})
add_definitions(${PCL_DEFINITIONS})

# boost
find_package(Boost REQUIRED)
include_directories(${Boost_INCLUDE_DIRS})

## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context



add_executable(kittivelo_cam src/kittivelo_cam.cpp)
## Specify libraries to link a library or executable target against
target_link_libraries(kittivelo_cam ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES} ${Boost_LIBRARIES})

2.package.xml

<?xml version="1.0"?>
<package format="2">
  <name>kittivelo_cam</name>
  <version>0.0.0</version>
  <description>The colored_pointcloud package</description>

  <!-- One maintainer tag required, multiple allowed, one person per tag -->
  <!-- Example:  -->
  <!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
  <maintainer email="imrs@todo.todo">imrs</maintainer>


  <!-- One license tag required, multiple allowed, one license per tag -->
  <!-- Commonly used license strings: -->
  <!--   BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
  <license>TODO</license>


  <!-- Url tags are optional, but multiple are allowed, one per tag -->
  <!-- Optional attribute type can be: website, bugtracker, or repository -->
  <!-- Example: -->
  <!-- <url type="website">http://wiki.ros.org/colored_pointcloud</url> -->


  <!-- Author tags are optional, multiple are allowed, one per tag -->
  <!-- Authors do not have to be maintainers, but could be -->
  <!-- Example: -->
  <!-- <author email="jane.doe@example.com">Jane Doe</author> -->


  <!-- The *depend tags are used to specify dependencies -->
  <!-- Dependencies can be catkin packages or system dependencies -->
  <!-- Examples: -->
  <!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
  <!--   <depend>roscpp</depend> -->
  <!--   Note that this is equivalent to the following: -->
  <!--   <build_depend>roscpp</build_depend> -->
  <!--   <exec_depend>roscpp</exec_depend> -->
  <!-- Use build_depend for packages you need at compile time: -->
  <!--   <build_depend>message_generation</build_depend> -->
  <!-- Use build_export_depend for packages you need in order to build against this package: -->
  <!--   <build_export_depend>message_generation</build_export_depend> -->
  <!-- Use buildtool_depend for build tool packages: -->
  <!--   <buildtool_depend>catkin</buildtool_depend> -->
  <!-- Use exec_depend for packages you need at runtime: -->
  <!--   <exec_depend>message_runtime</exec_depend> -->
  <!-- Use test_depend for packages you need only for testing: -->
  <!--   <test_depend>gtest</test_depend> -->
  <!-- Use doc_depend for packages you need only for building documentation: -->
  <!--   <doc_depend>doxygen</doc_depend> -->
  <buildtool_depend>catkin</buildtool_depend>
  <build_depend>cv_bridge</build_depend>
  <build_depend>image_transport</build_depend>
  <build_depend>message_filters</build_depend>
  <build_depend>roscpp</build_depend>
  <build_depend>rospy</build_depend>
  <build_depend>std_msgs</build_depend>
  <build_export_depend>cv_bridge</build_export_depend>
  <build_export_depend>image_transport</build_export_depend>
  <build_export_depend>message_filters</build_export_depend>
  <build_export_depend>roscpp</build_export_depend>
  <build_export_depend>rospy</build_export_depend>
  <build_export_depend>std_msgs</build_export_depend>
  <exec_depend>cv_bridge</exec_depend>
  <exec_depend>image_transport</exec_depend>
  <exec_depend>message_filters</exec_depend>
  <exec_depend>roscpp</exec_depend>
  <exec_depend>rospy</exec_depend>
  <exec_depend>std_msgs</exec_depend>


  <!-- The export tag contains other, unspecified, tags -->
  <export>
    <!-- Other tools can request additional information be placed here -->

  </export>
</package>

3. launch

<!-- -->
<launch>
  <node pkg="kittivelo_cam" type="kittivelo_cam" name="kittivelo_cam" output="screen">
  </node>
</launch>

4. cpp

//
// Created by cai on 2021/8/26.
//

#include<ros/ros.h>
#include<cv_bridge/cv_bridge.h>
#include<sensor_msgs/image_encodings.h>
#include<image_transport/image_transport.h>
#include <time.h>
#include<opencv2/opencv.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<cmath>
#include<stdio.h>
//#include "fssim_common/Cmd.h"
#include <Eigen/Core>
// 稠密矩阵的代数运算(逆,特征值等)
#include <Eigen/Dense>
#include <ros/ros.h>
#include "sensor_msgs/LaserScan.h"
#include "geometry_msgs/PoseWithCovarianceStamped.h"
#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <message_filters/time_synchronizer.h>
#include <boost/thread/thread.hpp>
#include <iostream>
#include <nav_msgs/Path.h>
#include <sensor_msgs/Imu.h>
#include <sensor_msgs/NavSatFix.h>

#include <sensor_msgs/PointCloud2.h>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/common/transforms.h>
#include <pcl_conversions/pcl_conversions.h>

#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/visualization/cloud_viewer.h>
#include <tf2_ros/transform_listener.h>
#include <sensor_msgs/point_cloud_conversion.h>
using namespace Eigen;
using namespace cv;
using namespace std;

#include "colored_pointcloud/colored_pointcloud.h"
#include <ros/ros.h>
#include <sensor_msgs/PointCloud2.h>

cv::Mat P_rect_00(3,4,cv::DataType<double>::type);//3×4 projection matrix after rectification
cv::Mat R_rect_00(4,4,cv::DataType<double>::type);//3×3 rectifying rotation to make image planes co-planar
cv::Mat RT(4,4,cv::DataType<double>::type);//rotation matrix and translation vector
class RsCamFusion
{
//**********************************************************************************************************
  //1、定义成员变量
  private:
    typedef     message_filters::sync_policies::ApproximateTime<sensor_msgs::Image,sensor_msgs::PointCloud2> slamSyncPolicy;
    message_filters::Synchronizer<slamSyncPolicy>* sync_;
    message_filters::Subscriber<sensor_msgs::Image>* camera_sub1; 
    message_filters::Subscriber<sensor_msgs::PointCloud2>* lidar_sub;
    
    pcl::PointCloud<PointXYZRGBI>::Ptr colored_cloud_toshow;
    pcl::PointCloud<PointXYZRGBI>::Ptr colored_cloud;
    pcl::PointCloud<PointXYZRGBI>::Ptr cloud_toshow;
    /*
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr colored_cloud_toshow;
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr colored_cloud;
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_toshow;
     */
    pcl::PointCloud<pcl::PointXYZI>::Ptr input_cloud;
    pcl::PointCloud<pcl::PointXYZI>::Ptr input_cloud_ptr;
    pcl::PointCloud<pcl::PointXYZI>::Ptr raw_cloud;
    cv::Mat input_image;
    cv::Mat image_to_show,image_to_show1;
    int frame_count = 0;
    static cv::Size imageSize;
    static ros::Publisher pub;
    //store calibration data in Opencv matrices

    image_transport::Publisher depth_pub ;
    sensor_msgs::ImagePtr depth_msg;
    ros::NodeHandle nh;
    ros::Publisher colored_cloud_showpub;
    ros::Subscriber sub;
    ros::Publisher fused_image_pub1;
	
  public:   
//构造函数
    RsCamFusion():

    nh("~"){

        RT.at<double>(0,0) = 7.533745e-03;RT.at<double>(0,1) = -9.999714e-01;RT.at<double>(0,2) = -6.166020e-04;RT.at<double>(0,2) = -4.069766e-03;
        RT.at<double>(1,0) = 1.480249e-02;RT.at<double>(1,1) = 7.280733e-04;RT.at<double>(1,2) = -9.998902e-01;RT.at<double>(1,3) = -7.631618e-02;
        RT.at<double>(2,0) = 9.998621e-01;RT.at<double>(2,1) = 7.523790e-03;RT.at<double>(2,2) = 1.480755e-02;RT.at<double>(2,3) = -2.717806e-01;
        RT.at<double>(3,0) = 0.0;RT.at<double>(3,1) = 0.0;RT.at<double>(3,2) = 0.0;RT.at<double>(3,3) = 1.0;

        R_rect_00.at<double>(0,0) = 9.999239e-01;R_rect_00.at<double>(0,1) = 9.837760e-03;R_rect_00.at<double>(0,2) = -7.445048e-03;R_rect_00.at<double>(0,3) = 0.0;
        R_rect_00.at<double>(1,0) = -9.869795e-03;R_rect_00.at<double>(1,1) = 9.999421e-01;R_rect_00.at<double>(1,2) = -4.278459e-03;R_rect_00.at<double>(1,3) = 0.0;
        R_rect_00.at<double>(2,0) = 7.402527e-03;R_rect_00.at<double>(2,1) = 4.351614e-03;R_rect_00.at<double>(2,2) = 9.999631e-01;R_rect_00.at<double>(2,3) = 0.0;
        R_rect_00.at<double>(3,0) = 0.0;R_rect_00.at<double>(3,1) = 0.0;R_rect_00.at<double>(3,2) = 0.0;R_rect_00.at<double>(3,3) = 1.0;

        P_rect_00.at<double>(0,0) = 7.215377e+02;P_rect_00.at<double>(0,1) = 0.000000e+00;P_rect_00.at<double>(0,2) = 6.095593e+02;P_rect_00.at<double>(0,3) = 0.000000e+00;
        P_rect_00.at<double>(1,0) = 0.000000e+00;P_rect_00.at<double>(1,1) = 7.215377e+02;P_rect_00.at<double>(1,2) = 1.728540e+02;P_rect_00.at<double>(1,3) = 0.000000e+00;
        P_rect_00.at<double>(2,0) = 0.000000e+00;P_rect_00.at<double>(2,1) = 0.000000e+00;P_rect_00.at<double>(2,2) = 1.000000e+00;P_rect_00.at<double>(2,3) = 0.000000e+00;
        camera_sub1  = new message_filters::Subscriber<sensor_msgs::Image>(nh, "/forward",300);
        lidar_sub  = new message_filters::Subscriber<sensor_msgs::PointCloud2>(nh, "/kitti/velo/pointcloud",100);
        sync_ = new  message_filters::Synchronizer<slamSyncPolicy>(slamSyncPolicy(100), *camera_sub1,*lidar_sub);
        sync_->registerCallback(boost::bind(&RsCamFusion::callback,this, _1, _2));
        cout<<"waite_image"<<endl;
        allocateMemory(); //初始化
    }

    void allocateMemory()
    {
        raw_cloud.reset(new pcl::PointCloud<pcl::PointXYZI>());
        colored_cloud_toshow.reset(new pcl::PointCloud<PointXYZRGBI>());
        colored_cloud.reset(new pcl::PointCloud<PointXYZRGBI>());
        cloud_toshow.reset(new pcl::PointCloud<PointXYZRGBI>());
        input_cloud.reset(new pcl::PointCloud<pcl::PointXYZI>());
        input_cloud_ptr.reset(new pcl::PointCloud<pcl::PointXYZI>());
        
    }
    void resetParameters(){ 
        raw_cloud->clear();
        input_cloud_ptr->clear();
        input_cloud->clear();
        colored_cloud_toshow->clear();
        colored_cloud->clear();
        cloud_toshow->clear();
    }
    void callback(const sensor_msgs::ImageConstPtr input_image_msg1,
            const sensor_msgs::PointCloud2ConstPtr input_cloud_msg)
    {
        resetParameters();
        cv::Mat input_image1;
        cv_bridge::CvImagePtr cv_ptr1; 
        
        std_msgs::Header image_header1 = input_image_msg1->header;
        std_msgs::Header cloud_header = input_cloud_msg->header;

        //数据获取
        //图像ROS消息转化
        
        cv_ptr1 = cv_bridge::toCvCopy(input_image_msg1,sensor_msgs::image_encodings::BGR8);
        input_image1 = cv_ptr1->image;
        
        //获取点云
        
        pcl::fromROSMsg(*input_cloud_msg, *input_cloud_ptr);//把input_cloud_msg放
        std::vector<int> indices; 
        pcl::removeNaNFromPointCloud(*input_cloud_ptr, *input_cloud_ptr, indices);//去除无效点
        transformpoint(input_cloud_ptr,input_image1,P_rect_00,R_rect_00,RT);
        cout<<"start"<<endl;
        colored_cloud_showpub = nh.advertise<sensor_msgs::PointCloud2>("colored_cloud_toshow",10);
        publishCloudtoShow(colored_cloud_showpub, cloud_header, colored_cloud_toshow);
        fused_image_pub1 = nh.advertise<sensor_msgs::Image>("image_to_show",10);
        publishImage(fused_image_pub1, image_header1, image_to_show1);
        frame_count = frame_count + 1;
    }
         
         //××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
    void transformpoint(const pcl::PointCloud<pcl::PointXYZI>::ConstPtr& input_cloud, const cv::Mat input_image, cv::Mat &P_rect_00,cv::Mat &R_rect_00,cv::Mat &RT)
    {

        cv::Mat X(4,1,cv::DataType<double>::type);
        cv::Mat Y(4,1,cv::DataType<double>::type);
        cv::Point pt;
        std::vector<cv::Point3f> rawPoints;
        
        *raw_cloud = *input_cloud;
        image_to_show = input_image.clone();
        for(int i=0;i<raw_cloud->size();i++) {

            
            // convert each 3D point into homogeneous coordinates and store it in a 4D variable X
            X.at<double>(0, 0) = raw_cloud->points[i].x;
            X.at<double>(1, 0) = raw_cloud->points[i].y;
            X.at<double>(2, 0) = raw_cloud->points[i].z;
            X.at<double>(3, 0) = 1;

            //apply the projection equation to map X onto the image plane of the camera. Store the result in Y

            //计算矩阵
            Y=P_rect_00*R_rect_00*RT*X;
            pt.x=Y.at<double>(0, 0) / Y.at<double>(2, 0);
            pt.y=Y.at<double>(1, 0) / Y.at<double>(2, 0);
            // transform Y back into Euclidean coordinates and store the result in the variable pt
            float d = Y.at<double>(2, 0)*1000.0;
            float val = raw_cloud->points[i].x;
            float maxVal = 20.0;
            int red = min(255, (int) (255 * abs((val - maxVal) / maxVal)));
            int green = min(255, (int) (255 * (1 - abs((val - maxVal) / maxVal))));
            
            if(pt.x<1240 &&pt.x>0 &&pt.y<375 &&pt.y>0 &&d>0)
            {
                /*
                if (int(input_image.at<cv::Vec3b>(pt.y, pt.x)[2]) == 128 || int(input_image.at<cv::Vec3b>(pt.y, pt.x)[2]) == 152
                  ||int(input_image.at<cv::Vec3b>(pt.y, pt.x)[2]) == 107 || int(input_image.at<cv::Vec3b>(pt.y, pt.x)[2]) == 244
                  ||int(input_image.at<cv::Vec3b>(pt.y, pt.x)[2]) == 70 )
                  {
                  */
                  
                  
                    cv::circle(image_to_show, pt, 1, cv::Scalar(0, green, red), cv::FILLED);
                    image_to_show1 = image_to_show;
                    PointXYZRGBI point;
                    point.x = raw_cloud->points[i].x;   
                    point.y = raw_cloud->points[i].y;   //to create colored point clouds
                    point.z = raw_cloud->points[i].z;
                    point.intensity = raw_cloud->points[i].intensity;
                    point.g = input_image.at<cv::Vec3b>(pt.y, pt.x)[1];
                    point.b = input_image.at<cv::Vec3b>(pt.y, pt.x)[0];
                    point.r = input_image.at<cv::Vec3b>(pt.y, pt.x)[2];
                    colored_cloud->points.push_back(point); 
                    
                  

            }
            /*
            else
            {
                pcl::PointXYZRGB point;
                point.x = raw_cloud->points[i].x;   
                point.y = raw_cloud->points[i].y;   //to create colored point clouds
                point.z = raw_cloud->points[i].z;
                //point.intensity = raw_cloud->points[i].intensity;
                point.g = 0;
                point.b = 0;
                point.r = 0;
                cloud_toshow->points.push_back(point); 
            }
            */
            
          }
          *colored_cloud_toshow=*colored_cloud+*cloud_toshow;
        }

    void publishCloudtoShow(const ros::Publisher& cloudtoshow_pub, const std_msgs::Header& header,
                  const pcl::PointCloud<PointXYZRGBI>::ConstPtr& cloud)
    {
        sensor_msgs::PointCloud2 output_msg;
        pcl::toROSMsg(*cloud, output_msg);
        output_msg.header = header;
        cloudtoshow_pub.publish(output_msg);
    }
    void publishImage(const ros::Publisher& image_pub, const std_msgs::Header& header, const cv::Mat image)
    {
      cv_bridge::CvImage output_image;
      output_image.header = header;
      output_image.encoding = sensor_msgs::image_encodings::TYPE_8UC3;
      output_image.image = image;
      image_pub.publish(output_image);
    } 
};
//*****************************************************************************************************
//
//                                           程序入口
//
//×××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
int main(int argc, char** argv)
{
  //1、节点初始化 及定义参数
    ros::init(argc, argv, "kitti3D2");
    RsCamFusion RF;
    ros::spin();
    return 0;
}

三、python——yolov5神经网络

#!/usr/bin/env python
# YOLOv5 

版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。

相关推荐


学习编程是顺着互联网的发展潮流,是一件好事。新手如何学习编程?其实不难,不过在学习编程之前你得先了解你的目的是什么?这个很重要,因为目的决定你的发展方向、决定你的发展速度。
IT行业是什么工作做什么?IT行业的工作有:产品策划类、页面设计类、前端与移动、开发与测试、营销推广类、数据运营类、运营维护类、游戏相关类等,根据不同的分类下面有细分了不同的岗位。
女生学Java好就业吗?女生适合学Java编程吗?目前有不少女生学习Java开发,但要结合自身的情况,先了解自己适不适合去学习Java,不要盲目的选择不适合自己的Java培训班进行学习。只要肯下功夫钻研,多看、多想、多练
Can’t connect to local MySQL server through socket \'/var/lib/mysql/mysql.sock问题 1.进入mysql路径
oracle基本命令 一、登录操作 1.管理员登录 # 管理员登录 sqlplus / as sysdba 2.普通用户登录
一、背景 因为项目中需要通北京网络,所以需要连vpn,但是服务器有时候会断掉,所以写个shell脚本每五分钟去判断是否连接,于是就有下面的shell脚本。
BETWEEN 操作符选取介于两个值之间的数据范围内的值。这些值可以是数值、文本或者日期。
假如你已经使用过苹果开发者中心上架app,你肯定知道在苹果开发者中心的web界面,无法直接提交ipa文件,而是需要使用第三方工具,将ipa文件上传到构建版本,开...
下面的 SQL 语句指定了两个别名,一个是 name 列的别名,一个是 country 列的别名。**提示:**如果列名称包含空格,要求使用双引号或方括号:
在使用H5混合开发的app打包后,需要将ipa文件上传到appstore进行发布,就需要去苹果开发者中心进行发布。​
+----+--------------+---------------------------+-------+---------+
数组的声明并不是声明一个个单独的变量,比如 number0、number1、...、number99,而是声明一个数组变量,比如 numbers,然后使用 nu...
第一步:到appuploader官网下载辅助工具和iCloud驱动,使用前面创建的AppID登录。
如需删除表中的列,请使用下面的语法(请注意,某些数据库系统不允许这种在数据库表中删除列的方式):
前不久在制作win11pe,制作了一版,1.26GB,太大了,不满意,想再裁剪下,发现这次dism mount正常,commit或discard巨慢,以前都很快...
赛门铁克各个版本概览:https://knowledge.broadcom.com/external/article?legacyId=tech163829
实测Python 3.6.6用pip 21.3.1,再高就报错了,Python 3.10.7用pip 22.3.1是可以的
Broadcom Corporation (博通公司,股票代号AVGO)是全球领先的有线和无线通信半导体公司。其产品实现向家庭、 办公室和移动环境以及在这些环境...
发现个问题,server2016上安装了c4d这些版本,低版本的正常显示窗格,但红色圈出的高版本c4d打开后不显示窗格,
TAT:https://cloud.tencent.com/document/product/1340