2023年7月25日发(作者:)
关于海思HI3559AV100及SDK的⼀些使⽤经验[TOC]概述我们绝对不会从0开始,所以,如果你想从0开始使⽤海思平----⽆论是做视频/⾳频采集还是做⼈⼯智能项⽬----,那得考虑⼀下你是否能得到海思的技术⽀持你是否熟练掌握各种规范你是否有⾜够多的时间去阅读⼿册你是否⾜够聪明去猜测海思开发⼈员的思路如果你的答案都是否,那么还是放弃吧。因为使⽤海思平台,你需要从头⾄尾了解所有的硬件,对于linux,你反倒不⽤太在乎;正所谓海思不在乎。海思从来不在乎⼩⽣意,FAE据说都在⼤客户那⾥,这种有钱就是爹的尿性正是太令⼈喜欢了。相反,nvidia跟你隔着N个时区,你在论坛上提的问题都有⼈详细跟踪,你只需要注册⼀个账号就⾏了。Video如果是从各路IMX传感器来,那么请使⽤海思提供的sample,如果是采集YUV视频,那么请继续往下读。海思提供了不少的样例,每⼀个样例的友好度⽆限接近于0。看着海思⾃以为聪明的命名⽅式,⾃以为聪明的代码排版,⾃以为聪明的结构设计,你⽆从下⼿。所以从现在开始,忘记你在使⽤linux,调整到⾯向海思SDK编程模式。MIPI-Rx如果你采⽤2-lane的⽅式,那么我劝你放弃。原因如下:不稳定,总是莫名其妙的错误,⼿动调整DESCREW也没⽤⼏乎不可能靠⾃⼰的⼒量解决问题,然⽽⼩公司不可能得到技术⽀持,各路代理商属于你问100个问题,他只回答⼀个的那种,⽽且他经常回答“我们也没⽤过”、“我们也不知道”、“你可以试试”、“没有”。海思提供的寄存器不⾜以让你找到问题,⽐如:MIPI(N)_PKT_INTR_ST、MIPI(N)_PKT_INTR2_ST、MIPI(N)_FRAME_INTR_ST会告诉你发⽣了错误,但你难以进⼀步跟踪,⽐如此时你想捕获⼀个包头看看,⼀个包头正好是4字节,海思没有提供寄存器,所幸,可以读PHY_PH_MIPI_LINK(P),因为你只有2-lane,所以你只能遗憾的得到头2个字节,总之如果你的CSI不稳定,你很难从硬件上找到问题并解决。⽽如果选⽤4-lane,那么你正好可以从PHY_PH_MIPI_LINK(P)读出包头。为了调试2-lane,我废了很多⼯夫,海思提供的himm读写不好⽤,我这⾥提供⼀个还不错的。//// (c) 2020 chiv//#include "utils/flags.h"#include #include #include #include #include #include #include qDefineFlag(uint32, phy, 0, "phy link, [0..3]");qDefineFlag(uint32, chn, 0, "mipi rx chn, [0..7]");qDefineFlag(bool, wr, true, "read");qDefineFlag(bool, dump, true, "dump status register only");qDefineFlag(uint32, addr, 0, "register offset");qDefineFlag(uint32, value, 0, "if write mode was enabled, this is the value to write");qDefineFlag(bool, relative, true, "use offset");uint32_t regaddr(uint32_t pa_offset, uint32_t addr, uint32_t reg_addr){ return addr - pa_offset + reg_addr;}void dump_mipi_rx_regs(int mem_fd, int phy, int chn){ size_t len = 0x10000; uint32_t offset = 0x11a40000; uint32_t pa_offset = offset & ~(sysconf(_SC_PAGE_SIZE) - 1); printf("mipi rx, pa_offset = %08xn", pa_offset); uint32_t* base = (uint32_t*)mmap(NULL, len + offset - pa_offset, PROT_READ | PROT_WRITE, MAP_SHARED, mem_fd, pa_offset); if (base) { uint32_t pkt_intr_st = 0x1060 + 0x1000 * chn; uint32_t pkt_intr_st2 = 0x1070 + 0x1000 * chn; uint32_t frm_intr_st = 0x1080 + 0x1000 * chn; printf("PKT_INTR_ST :033[31m%08x033[0mn", *(base + regaddr(pa_offset, offset, pkt_intr_st) / 4)); printf("PKT_INTR_ST2:033[31m%08x033[0mn", *(base + regaddr(pa_offset, offset, pkt_intr_st2) / 4)); printf("FRM_INTR_ST :033[31m%08x033[0mn", *(base + regaddr(pa_offset, offset, frm_intr_st) / 4)); // munmap((void*)base, len + offset - pa_offset); }}void process_read(int mem_fd, uint32_t addr){ if (qFlag(relative)) { addr += 0x11a40000; } size_t len = 4; uint32_t offset = addr; uint32_t pa_offset = offset & ~(sysconf(_SC_PAGE_SIZE) - 1); printf("pa_offset = %08xn", pa_offset); uint32_t* base = (uint32_t*)mmap(NULL, len + offset - pa_offset, PROT_READ | PROT_WRITE, MAP_SHARED, mem_fd, pa_offset); if (base) { printf("Reg:%08x, Value:%08xn", addr, *(base + regaddr(pa_offset, addr, 0) / 4)); // munmap((void*)base, len + offset - pa_offset); }}void process_write(int mem_fd, uint32_t addr, uint32_t value){ if (qFlag(relative)) { addr += 0x11a40000; } size_t len = 4; uint32_t offset = addr; uint32_t pa_offset = offset & ~(sysconf(_SC_PAGE_SIZE) - 1); printf("pa_offset = %08xn", pa_offset); uint32_t* base = (uint32_t*)mmap(NULL, len + offset - pa_offset, PROT_READ | PROT_WRITE, MAP_SHARED, mem_fd, pa_offset); if (base) { *(base + regaddr(pa_offset, offset, 0) / 4) = value; // munmap((void*)base, len + offset - pa_offset); }}int main(int argc, char** argv){ // usage std::string usage = std::string() + "n" + argv[0] + "--dump=1, dump mipi_rx status registersn" + argv[0] + "--dump=0 --wr=1 --addr=0x1060, read the mipi_rx registern" + argv[0] + "--dump=0 --wr=1 --addr=0x11a41060 --relative=false, read any registern" + argv[0] + "--dump=0 --wr=1 --addr=0x11a41060 --relative=false, read any registern" + argv[0] + "--dump=0 --wr=0 --addr=0x1064 --value=0x10081008, write the mipi_rx registern" + argv[0] + "--dump=0 --wr=0 --addr=0x10601064 --value=0 --relative=false, write any registern"; //printf("%sn", usage.c_str()); google::SetUsageMessage(usage); qInitializeFlags(&argc, &argv);
int mem_fd = open("/dev/mem", O_RDWR, 0666); if (mem_fd < 0) { printf("can't open device:/dev/memn"); return -1; } if (qFlag(dump)) { dump_mipi_rx_regs(mem_fd, qFlag(phy), qFlag(chn)); } else { if (qFlag(wr)) { // read mode process_read(mem_fd, qFlag(addr)); } else { process_write(mem_fd, qFlag(addr), qFlag(value)); } } // close(mem_fd); return 0;}为了使⽤时⽅便,通常可以放⼊脚本中执⾏#!/bin/sh# read mipi registerreg_addr=0x20if [ $1 != "" ]; then reg_addr=$1 echo read $reg_addrfi./dump_mipi_rx_regs --dump=0 --wr=1 --addr=$reg_addr#!/bin/sh# write value to mipi registerif [ $# != 2 ]; then echo "Usage:$0 " exitfi./dump_mipi_rx_regs --dump=0 --wr=0 --addr=$1 --value=$2例如,读取link0寄存器PHY_PH_MIPI_LINK(P)./mipir 0x1c程序中内置了mipi-rx的寄存器基地址,默认情况下输⼊都被当做相对地址,使⽤--relative=0可启⽤绝对地址。最常⽤的地址就是0x1c[PHY_PH_MIPI_LINK(P)],可以从中读出MIPI的包头,即Type-ID,Word-Count,ECC。不带参数执⾏程序,可以直接输出MIPI_Rx的⼏个状态寄存器,⽆错误时都为0。如果这些都没有错误,那么cat /proc/umap/hi_mipi可以获得可阅读的更多信息,否则还是拿上你的数据⼿册打印寄存器的值看看吧。YUV我们只关注YUV,也即sensor输出的数据为YUV格式。海思提供有限⽀持,在⽂档《HiMPP V4.0 媒体处理软件 FAQ》的3.3.4节有介绍,这也是唯⼀的配置⽅法,我也是从这⾥抄的。海思的各个硬件之间可以互相倒腾数据,这个⼯作可以由SDK完成。使⽤YUV时要旁路ISP,那么此时我们可以粗略的得到⼀个⼯作流程图。graph TB id1[MIPI_Rx]-->id2(VI_CAP) subgraph Video input #0 id2==>id3[PIPE0, YUV422] id2-.->id4[PIPE2-4] id3==>id5[Channel #0, YUV420 SP] id5-.->id6[Ext channel#0-8] id5==>id7(Encoder) end subgraph Video Encoder #0 id7==>id8[Channel #0, H265] id8==>id9[Query frame] end subgraph live555 id9==>id10[Send to network] end对于HI3559AV100⽽⾔,Video和Mipi的对应关系不需要设置,天然对应;PIPE可动态绑定,每个PIPE有⾄少1个物理通道和⼏个扩展通道,它们可以完成⼀些诸如裁切、缩放、旋转之类的⼯作。理清了视频的流向,那么程序也就有了思路,不要看海思的样例,按照我们⾃⼰的思路⾛。第⼀个YUV采集程序硬件连接4-lane不像2-lane⼀样,在设计原理图时不容易迷惑。HI3559AV100有16个lane,4个link,4-lane的连接⽅式可以接⼊4路设备。graph LR id1["Sensor0"]==>id2["Lane0~3"] id3["Sensor1"]==>id4["Lane4~7"] id5["Sensor2"]==>id6["Lane8~Lane11"] id7["Sensor3"]==>id9["Lane12~Lane15"]海思SDK中的资源描述海思的SDK中把路视频输抽象为VI(Video input),并给它们分配不同的设备号,HI3559AV100最多可接⼊8路视频,因此VI的设备号范围为0~7。当采⽤2-lane的⽅式时,可接⼊8路Sensor,VI设备号对应0~7,但是当采⽤4-lane的连接时,设备号只能由4个,但范围并不是0~3,⽽是0,2,4,6。对于MIPI,海思SDK也采⽤了类似的抽象⽅式,其设备号范围为对应Vi的设备号。开始编写程序⾸先,使⽤海思的SDK就不要想着多进程了。其次,使⽤相应的功能之前需要初始化。第⼀步,建⽴硬件描述硬件的连接已经⾮常清楚了,我们专门建⽴模块来描述它们。//// (c) 2020 chiv//#pragma once#include "mpi_vi.h"#include "hi_mipi.h"// all definitions of Hisilicon modulesnamespace lowlevel {namespace desc {//---------------------------------------------------------------------------------------// Audio//---------------------------------------------------------------------------------------//---------------------------------------------------------------------------------------// Video//---------------------------------------------------------------------------------------// VI_DEV_ATTR_S// VI_PIPE_ATTR_S// VI_PIPE_CHN_ATTR_Sstruct ViAttrs { VI_DEV_ATTR_S DevAttr; VI_PIPE_ATTR_S PipeAttr; VI_CHN_ATTR_S ChnAttr;};ViAttrs MakeMipiYuvAttrs_YUV422_8bits_UYVY(int width, int height);//// 4 input signals connected to mipi rxcombo_dev_attr_t MakeMipiAttrs_Input_0(int width, int height);combo_dev_attr_t MakeMipiAttrs_Input_1(int width, int height);combo_dev_attr_t MakeMipiAttrs_Input_2(int width, int height);combo_dev_attr_t MakeMipiAttrs_Input_3(int width, int height);//static const int kNumberOfInputs = 4;//combo_dev_t MakeComboDev(int idx);VI_DEV MakeViDev(int idx);VI_PIPE MakeViPipe(int idx);//static inline combo_dev_attr_t MakeMipiAttrs(int idx, int cx, int cy){ switch (idx) { case 0: return MakeMipiAttrs_Input_0(cx, cy); case 1: return MakeMipiAttrs_Input_1(cx, cy); case 2: return MakeMipiAttrs_Input_2(cx, cy); case 3: return MakeMipiAttrs_Input_3(cx, cy); default:printf("033[31mMipiDevNo[%d] is not valid033[0m", MakeComboDev(idx)); abort();break; }}}}#include "HardwareDesc.h"namespace lowlevel {//---------------------------------------------------------------------------------------// Audio//---------------------------------------------------------------------------------------//---------------------------------------------------------------------------------------// Video//---------------------------------------------------------------------------------------static combo_dev_attr_t MIPI_YUV422_ATTR_BASE = { .devno = 0, .input_mode = INPUT_MODE_MIPI, .data_rate = MIPI_DATA_RATE_X1, .img_rect = { 0, 0, 1920, 1080 }, { .mipi_attr = { { .mipi_attr = { DATA_TYPE_YUV422_8BIT, HI_MIPI_WDR_MODE_NONE, { -1, -1, -1, -1, -1, -1, -1, -1 } } }};static VI_DEV_ATTR_S DEV_MIPI_YUV422_ATTR_BASE = { VI_MODE_MIPI_YUV422, VI_WORK_MODE_1Multiplex, { 0xFF000000, 0x00FF0000 }, VI_SCAN_PROGRESSIVE, { -1, -1, -1, -1 }, VI_DATA_SEQ_UVUV, { VI_VSYNC_PULSE, VI_VSYNC_NEG_LOW, VI_HSYNC_VALID_SINGNAL, VI_HSYNC_NEG_HIGH, VI_VSYNC_VALID_SINGAL, VI_VSYNC_VALID_NEG_HIGH, { 0, 1920, 0, 0, 1080, 0, 0, 0, 0 } }, VI_DATA_TYPE_YUV, HI_FALSE, { 1920, 1080 }, { { { 1920, 1080 }, }, { VI_REPHASE_MODE_NONE, VI_REPHASE_MODE_NONE } }, { WDR_MODE_NONE, 1080 }, DATA_RATE_X1};static VI_PIPE_ATTR_S PIPE_YUV422_ATTR_BASE = { VI_PIPE_BYPASS_NONE, HI_FALSE, HI_TRUE, 1920, 1080, PIXEL_FORMAT_YVU_SEMIPLANAR_422, COMPRESS_MODE_NONE, DATA_BITWIDTH_8, HI_FALSE, { PIXEL_FORMAT_YVU_SEMIPLANAR_422, DATA_BITWIDTH_8, VI_NR_REF_FROM_RFR, COMPRESS_MODE_NONE }, HI_FALSE, { -1, -1 }, HI_FALSE};static VI_CHN_ATTR_S CHN_YUV422_ATTR_BASE { { 1920, 1080 }, PIXEL_FORMAT_YVU_SEMIPLANAR_420, // CHN ---> VENC, VENC need YVU420 semiplanar DYNAMIC_RANGE_SDR8, VIDEO_FORMAT_LINEAR, COMPRESS_MODE_NONE, HI_FALSE, HI_FALSE, 0, { -1, -1 }};namespace desc { // combo_dev_t MakeComboDev(int idx) { return idx * 2; } VI_DEV MakeViDev(int idx) { return idx * 2; } VI_PIPE MakeViPipe(int idx) { return idx * 2; } // YUV 422 8bit ViAttrs MakeMipiYuvAttrs_YUV422_8bits_UYVY(int width, int height) { ViAttrs attrs = { ViAttrs attrs = { .DevAttr = DEV_MIPI_YUV422_ATTR_BASE, .PipeAttr = PIPE_YUV422_ATTR_BASE, .ChnAttr = CHN_YUV422_ATTR_BASE }; // No need for YUV ngBlank = VI_TIMING_BLANK_S { 0, (HI_U32)width, 0, 0, (HI_U32)height, 0, 0, 0, 0 }; ize = SIZE_S { (HI_U32)width, (HI_U32)height }; = SIZE_S { (HI_U32)width, (HI_U32)height }; ttr.u32CacheLine = height; tr.u32MaxW = width; tr.u32MaxH = height; = SIZE_S { (HI_U32)width, (HI_U32)height }; return attrs; } combo_dev_attr_t MakeMipiAttrs_Input_0(int width, int height) { combo_dev_attr_t attr = MIPI_YUV422_ATTR_BASE; = 0; _rect = img_rect_t { 0, 0, (HI_U32)width, (HI_U32)height }; __id[0] = 0; __id[1] = 1; __id[2] = 2; __id[3] = 3; __id[4] = -1; __id[5] = -1; __id[6] = -1; __id[7] = -1; return attr; } combo_dev_attr_t MakeMipiAttrs_Input_1(int width, int height) { combo_dev_attr_t attr = MIPI_YUV422_ATTR_BASE; = 2; _rect = img_rect_t { 0, 0, (HI_U32)width, (HI_U32)height }; __id[0] = 4; __id[1] = 5; __id[2] = 6; __id[3] = 7; __id[4] = -1; __id[5] = -1; __id[6] = -1; __id[7] = -1; return attr; } combo_dev_attr_t MakeMipiAttrs_Input_2(int width, int height) { combo_dev_attr_t attr = MIPI_YUV422_ATTR_BASE; = 4; _rect = img_rect_t { 0, 0, (HI_U32)width, (HI_U32)height }; __id[0] = 8; __id[1] = 9; __id[2] = 10; __id[3] = 11; __id[4] = -1; __id[5] = -1; __id[6] = -1; __id[7] = -1; return attr; } combo_dev_attr_t MakeMipiAttrs_Input_3(int width, int height) { combo_dev_attr_t attr = MIPI_YUV422_ATTR_BASE; = 6; _rect = img_rect_t { 0, 0, (HI_U32)width, (HI_U32)height }; __id[0] = 12; __id[1] = 13; __id[2] = 14; __id[3] = 15; __id[4] = -1; __id[5] = -1; __id[6] = -1; __id[7] = -1; return attr; }} // namespace desc} // namespace lowlevel这部分代码的⽬的是⽤⼀个连续的数字代替视频输⼊,例如:0代表第⼀路输⼊,1代表第⼆路输⼊,等等,硬件如何实现不重要,重要的是要把硬件的输⼊与idx对应起来,故⽽对⽤户⽽⾔,只存在video0、video1这样的设备,不存在理解上的偏差。第⼆步,初始化系统使⽤视频采集需要VB(Video buffer)模块,Sys是必须的,⽆⽤赘⾔。// 初始化Video bufferstatic bool _InitVb(){ struct { int Width; int Height; PIXEL_FORMAT_E PixFmt; DATA_BITWIDTH_E BitWidth; COMPRESS_MODE_E CompressMode; HI_U32 Align; } fmts[] = { { 1920, 1080, PIXEL_FORMAT_YVU_SEMIPLANAR_422, DATA_BITWIDTH_8, COMPRESS_MODE_NONE, DEFAULT_ALIGN }, { 1280, 720, PIXEL_FORMAT_YVU_SEMIPLANAR_422, DATA_BITWIDTH_8, COMPRESS_MODE_NONE, DEFAULT_ALIGN } }; const int kFmtsCnt = sizeof(fmts) / sizeof(fmts[0]); //------------------------------------------------------------------- VB_CONFIG_S vb_conf; bzero(&vb_conf, sizeof(vb_conf)); vb_conf.u32MaxPoolCnt = kFmtsCnt; for (int k = 0; k < kFmtsCnt; ++k) { HI_U32 frmsz = COMMON_GetPicBufferSize(fmts[k].Width, fmts[k].Height, fmts[k].PixFmt, fmts[k].BitWidth, fmts[k].CompressMode, fmts[k].Align); vb_mPool[k].u32BlkCnt = 8; vb_mPool[k].u64BlkSize = frmsz; } MPI_CHECK(HI_MPI_VB_SetConfig(&vb_conf), return false); MPI_CHECK(HI_MPI_VB_Init(), return false); return true;}// 初始化Systemstatic bool _InitSys(){ MPI_CHECK(HI_MPI_SYS_Init(), return false); return true;}我们把这个过程总结为⼀个函数bool Initialize(){ bool ok = _InitVb() && _InitSys(); return ok;}void Shutdown(){ HI_MPI_SYS_Exit(); HI_MPI_VB_Exit();}第三步,启动MIPI// 模式7, 4-lanevoid RunOnce(){ lane_divide_mode_t ld_mode = LANE_DIVIDE_MODE_7; Mipi::Inst()->IoControl(HI_MIPI_SET_HS_MODE, &ld_mode);}// idx 可取0,1,2,3bool StartMipi(int idx, int cx, int cy){ bool ok = true; combo_dev_t devno = desc::MakeComboDev(idx); ok = ok && Mipi::Inst()->IoControl(HI_MIPI_ENABLE_MIPI_CLOCK, &devno); ok = ok && Mipi::Inst()->IoControl(HI_MIPI_RESET_MIPI, &devno); auto dev_attr = desc::MakeMipiAttrs(idx, cx, cy); ok = ok && Mipi::Inst()->IoControl(HI_MIPI_SET_DEV_ATTR, &dev_attr); ok = ok && Mipi::Inst()->IoControl(HI_MIPI_UNRESET_MIPI, &devno); // return ok;}第四步,创建VI创建VI包括启⽤VI_DEV,绑定PIPE,设置CHANNEL等⼯作。// idx 可取 0,1,2,3bool StartVi(int idx, int cx, int cy){ VI_DEV devno = desc::MakeViDev(idx); VI_PIPE pipeno = desc::MakeViPipe(idx); // step 1. enable vi dev auto attrs = desc::MakeMipiYuvAttrs_YUV422_8bits_UYVY(cx, cy); MPI_CHECK(HI_MPI_VI_SetDevAttr(devno, &r), return false); MPI_CHECK(HI_MPI_VI_EnableDev(devno), return false); // step 2. start pipe VI_DEV_BIND_PIPE_S binded_pipe; bzero(&binded_pipe, sizeof(binded_pipe)); binded_[0] = pipeno; binded_pipe.u32Num = 1; MPI_CHECK(HI_MPI_VI_SetDevBindPipe(devno, &binded_pipe), return false); MPI_CHECK(HI_MPI_VI_CreatePipe(pipeno, &tr), return false); MPI_CHECK(HI_MPI_VI_StartPipe(pipeno), return false); // step 3. start chn // Pipe has 2 channel, we use channel 0. VI_CHN chnno = 0; MPI_CHECK(HI_MPI_VI_SetChnAttr(pipeno, chnno, &r), return false); MPI_CHECK(HI_MPI_VI_EnableChn(pipeno, chnno), return false); // return true;}第五步,创建编码通道static HI_S32 CreateH265Channel(VENC_CHN chnno, int cx, int cy, int fps){ VENC_CHN_ATTR_S chn_attr; bzero(&chn_attr, sizeof(chn_attr)); // 这个函数从SDK的sample中抄的. SAMPLE_COMM_VENC_GetGopAttr(VENC_GOPMODE_NORMALP, &chn_ttr); // rc attr , variable bitrate HI_U32 stat_time = 1, gop = 30; if (chn_ode == VENC_GOPMODE_ADVSMARTP) { stat_time = chn_tP.u32BgInterval / gop; } else if (chn_ode == VENC_GOPMODE_SMARTP) { stat_time = chn_tP.u32BgInterval / gop; } // vbr#if 0 chn_de = VENC_RC_MODE_H265VBR; chn_265Vbr.u32Gop = gop; chn_265Vbr.u32StatTime = stat_time; chn_265Vbr.u32SrcFrameRate = fps; chn_32DstFrameRate = std::min(25, fps); chn_265Vbr.u32MaxBitRate = 900; // 900k#endif#if 1 // 选⽤固定码率 chn_de = VENC_RC_MODE_H265CBR; chn_265Cbr.u32Gop = gop; chn_265Cbr.u32StatTime = stat_time; chn_265Cbr.u32SrcFrameRate = fps; chn_32DstFrameRate = std::min(25, fps); chn_265Cbr.u32BitRate = 500;//1024*2 + 2048*fps/gop;#endif // enc attr chn_ = PT_H265; chn_Attr.u32MaxPicWidth = cx; chn_Attr.u32MaxPicHeight = cy; chn_Attr.u32PicWidth = cx; chn_Attr.u32PicHeight = cy; chn_Attr.u32BufSize = cx * cy * 2; chn_Attr.u32Profile = 0; // H265 mainprofile chn_me = HI_TRUE; chn_fShareBuf = HI_FALSE; // create channel MPI_CHECK(HI_MPI_VENC_CreateChn(chnno, &chn_attr), return HI_FAILURE); // VENC_RECV_PIC_PARAM_S recv_param; recv_param.s32RecvPicNum = -1; MPI_CHECK(HI_MPI_VENC_StartRecvFrame(chnno, &recv_param), return HI_FAILURE); // return HI_SUCCESS;}第六步,建⽴视频到编码器的管道void BuildPipeline(int idx){ MPP_CHN_S src; d = HI_ID_VI; src.s32DevId = desc::MakeViDev(idx); src.s32ChnId = 0; MPP_CHN_S dst; d = HI_ID_VENC; dst.s32DevId = 0; dst.s32ChnId = idx; MPI_CHECK(HI_MPI_SYS_Bind(&src, &dst), return -1);}所以完整的流程是因为是记录过程,因此失败后的处理代码不赘述了。int main(){ // 初始化必要的模块 Initialize(); // 只需要执⾏⼀次的内容, 这⾥是选择MIPI的⼯作模式 RunOnce(); // 创建4个采集通道 for (int i = 0; i < 4; ++i) { StartMipi(i, 1920, 1080); StartVi(i, 1920, 1080); CreateH265Channel(i, 1920, 1080, 60); BuildPipeline(i); } // 取编码数据 while (1) { for (int i = 0; i < 4; ++i) { auto out = QueryFrame(i, 100); if (!()) { Dump(i, out); } } } // release your resources here}// 从编码器⾥取数据std::vector QueryFrame(int idx, int timeout){ std::vector out; if (idx < 0) return out; // return empty for invalid idx. // VENC_CHN_STATUS_S chn_status; MPI_CHECK(HI_MPI_VENC_QueryStatus(idx, &chn_status), return out); if (chn_status.u32CurPacks > 0) { VENC_STREAM_S stream; bzero(&stream, sizeof(stream)); k = new VENC_PACK_S[chn_status.u32CurPacks]; stream.u32PackCount = chn_status.u32CurPacks; MPI_CHECK(HI_MPI_VENC_GetStream(idx, &stream, timeout), delete[] k; return out); HI_U32 len = 0; for (HI_U32 k = 0; k < stream.u32PackCount; ++k) { len += k[k].u32Len - k[k].u32Offset; } out = std::vector(len); len = 0; for (HI_U32 k = 0; k < stream.u32PackCount; ++k) { memcpy(() + len, k[k].pu8Addr + k[k].u32Offset, k[k].u32Len - k[k].u32Offset); len += k[k].u32Len - k[k].u32Offset; } // MPI_CHECK(HI_MPI_VENC_ReleaseStream(idx, &stream), ); delete[] k; } return out;}吐槽时间视频采集,linux有v4l2系统,为什么不⽤?⾳频采集,linux有alsa,为什么不⽤?以及我什么也没说虚拟通道的处理如果采⽤了虚拟通道,即在4-lane上传输多路视频,那么需要绑定对应数⽬的PIPE,并使⽤HI_MPI_VI_SetPipeVCNumber为绑定的PIPE指定虚拟通道号,在使能PIPE的物理通道时,要设置物理通道的队列深度⼤于0,这样之后可以使⽤HI_MPI_VI_GetChnFrame从物理通道⾥获取YUV420 SP格式的数据,并调⽤HI_MPI_VENC_SendFrame把数据送⼊编码器编码。Video output海思SDK⽤VO来抽象视频输出。HI3559AV100有三种物理输出,HDMI、BT1120和MIPI-Tx。VO的⽬的⽤途是产⽣视频帧、产⽣正确的时序,驱动物理接⼝输出。graph LRsubgraph "HI3559AV100 VO" id1["DHD0"]---id2["VHD0, Super HD"] id1["DHD0"]---id3["VHD2, HD"] id1["DHD0"]---id6["G0,G3"] id4["DHD1"]---id5["VHD1, HD"] id4["DHD1"]---id7["G1,G3"]endDHD0⽀持超⾼清输出,DHD1⽀持⾼清输出。VHD、G可以叠加,DHD0和DHD1都可以与G3叠加,故⽽G3可以⽤来实现⿏标层。海思SDK中内置了⼀些常⽤的分辨率,使⽤它们时不需要⾃⼰设置时序,否则就需要⾃定义时序。⾃定义时序时,请餐卡vesa标准,linux下有⼀些⼯具⽅便进⾏计算,例如gtf、cvt,nvidia的显卡驱动中也带了计算⼯具,vesa官⽅提供了⼀个excel表格,也可以进⾏计算,总之还是不难的。程序流程// 初始化模块bool Initialize(){ Shutdown(); // bool ok = _InitVb() && _InitSys() && Hdmi::Initialize(); // should be ok return ok;}// 初始化输出物理设备bool Hdmi::Open(int cx, int cy, int fps){ // init, register callback auto devid = _FromIdx(mIdx); MPI_CHECK(HI_MPI_HDMI_Open(devid), return false); // register callback MPI_CHECK(HI_MPI_HDMI_RegCallbackFunc(devid, mRc->HdmiCb()), ); // HI_HDMI_ATTR_S attr; MPI_CHECK(HI_MPI_HDMI_GetAttr(devid, &attr), return false); eHdmi = HI_TRUE; eVideo = HI_TRUE; oFmt = _FindVideoFmt(cx, cy, fps); if (oFmt == HI_HDMI_VIDEO_FMT_VESA_CUSTOMER_DEFINE) { qLog(ERROR) << "Dose not support user defined timing yet" << std::endl; return false; } nMode = HI_HDMI_VIDEO_MODE_YCBCR444; utMode = HI_HDMI_VIDEO_MODE_YCBCR444; ColorMode = HI_HDMI_DEEP_COLOR_24BIT; Mode = HI_FALSE; scQuantization = HDMI_QUANTIZATION_LIMITED_RANGE; eAudio = HI_FALSE; dIntf = HI_HDMI_SND_INTERFACE_I2S; tiChannel = HI_FALSE; epth = HI_HDMI_BIT_DEPTH_16; eAviInfoFrame = HI_TRUE; eAudInfoFrame = HI_TRUE; eSpdInfoFrame = HI_FALSE; eMpegInfoFrame = HI_FALSE; Flag = HI_FALSE; nable = HI_FALSE; attr.b3DEnable = HI_FALSE; ultMode = HI_HDMI_FORCE_HDMI; MPI_CHECK(HI_MPI_HDMI_SetAttr(devid, &attr), return false); MPI_CHECK(HI_MPI_HDMI_Start(devid), return false); // return true;}// 初始化输出bool InitVO(){ MPI_CHECK(HI_MPI_VO_Disable(vodev), ); MPI_CHECK(HI_MPI_VO_SetPubAttr(vodev, &pub_attr), return false); MPI_CHECK(HI_MPI_VO_Enable(vodev), return false); // 启⽤层 VO_VIDEO_LAYER_ATTR_S vlayer_attr; vlayer_erMode = HI_FALSE; vlayer_eFrame = HI_FALSE; vlayer_ynamicRange = DYNAMIC_RANGE_SDR8; vlayer_ormat = PIXEL_FORMAT_YVU_SEMIPLANAR_422; vlayer_eSize = SIZE_S { (HI_U32)cx, (HI_U32)cy }; vlayer_Rect = RECT_S { 0, 0, (HI_U32)cx, (HI_U32)cy }; vlayer_attr.u32DispFrmRt = fps; MPI_CHECK(HI_MPI_VO_SetDisplayBufLen(mVoLayer, 3), return false); MPI_CHECK(HI_MPI_VO_SetVideoLayerAttr(mVoLayer, &vlayer_attr), return false); MPI_CHECK(HI_MPI_VO_EnableVideoLayer(mVoLayer), return false);}海思的SDK是通⽤的,所以每⼀个⽤户都需要根据硬件的情况设计应⽤。哎,带⼀个图形界⾯多好,这部分⼯作交给⼩⼚家去做很费劲的。如果仅仅基于framebuffer做⼀个GUI,那效率堪忧。瑞芯微3399上有⼀个linux发⾏版,⽤起来就是卡卡的,看着性能与nvidia的jetson不差,但是jetson的GUI那是可以办公⽤的。AudioTo be continuedGPU按照⼿册测试,有的不能运⾏,能运⾏的结果不对。哎,好好的,你整个X windows实现不⾏吗?
发布者:admin,转转请注明出处:http://www.yc00.com/web/1690228481a317995.html
评论列表(0条)