Browse Source

:seedling: 整合人脸SDK

everywindchase 10 months ago
parent
commit
9a578be159

+ 1 - 1
app/build.gradle

@@ -10,7 +10,7 @@ android {
         applicationId "com.ethan.psbc"
         applicationId "com.ethan.psbc"
         minSdk libs.versions.minSdk.get().toInteger()
         minSdk libs.versions.minSdk.get().toInteger()
         targetSdk libs.versions.compileSdk.get().toInteger()
         targetSdk libs.versions.compileSdk.get().toInteger()
-        versionCode 33
+        versionCode 47
         versionName "1.1.6"
         versionName "1.1.6"
         setProperty('archivesBaseName', "PSBC-$versionName-$versionCode-" + (new Date()).format('yyyyMMdd'))
         setProperty('archivesBaseName', "PSBC-$versionName-$versionCode-" + (new Date()).format('yyyyMMdd'))
         ndk { abiFilters    "armeabi","x86","x86_64","armeabi-v7a","arm64-v8a" }
         ndk { abiFilters    "armeabi","x86","x86_64","armeabi-v7a","arm64-v8a" }

+ 90 - 93
app/src/main/assets/CWModels/_configs_frontend_x86_arm.xml

@@ -1,96 +1,93 @@
 <?xml version="1.0"?>
 <?xml version="1.0"?>
 <opencv_storage>
 <opencv_storage>
-<!-- VIP Frontend Configurations by Cloudwalk. -->
-<!-- 如果使用默认参数值,注释相应行即可 -->
-<VIP_Frontend>
-  <!-- 创建句柄所需初始化参数 -->
-  <Face_INIT_Params>
-    <!-- 前端worker数,范围1~6. 默认3. (建议根据实际设备性能和应用情景设置最佳值,如E5上10路建议设为2或者3) -->
-    <Worker_Num>3</Worker_Num>
-    <!-- 检测器尺度计算worker数,范围1~6,默认3,可根据实际情况启动多workers -->
-    <Worker_DETS_Num>3</Worker_DETS_Num>
-    <!-- 检测器滑窗计算worker数,范围1~6,默认1,可根据实际情况启动多workers(效果不如尺度并行,核心数很多时建议进行分配) -->
-    <Worker_DETW_Num>2</Worker_DETW_Num>
-    <!-- 加载模型方式(0-文件方式),dl版暂时支持文件模式 -->
-    <Model_Mode>0</Model_Mode>
-    <!-- 人脸检测器模型(文件方式时有效, DL版) -->
-    <str_face_detect_model_dl>faceDetector_3_15deepnet1_4_3.mdl</str_face_detect_model_dl>
-    <!-- 人脸检测器模型(文件方式时有效, 非DL版) -->
-    <str_face_detect_model>faceDetector_2_4.mdl</str_face_detect_model>
-    <!-- dl关键点检测器模型(文件方式时有效) -->
-    <str_face_keypt_model_dl>landmarks_2_0_2.bin</str_face_keypt_model_dl>
-    <!-- 质量分检测器模型(文件方式时有效) -->
-    <str_face_quality_model>face_quality_4_2.bin</str_face_quality_model>
-    <!-- Face_INIT_Params -->
-    </Face_INIT_Params>
-  <!-- 接口功能参数 -->
-  <Face_DetTrack_Params>
-    <!-- 每帧最大人脸数 -->
-    <Faces_Num_Max_Frame>20</Faces_Num_Max_Frame>
-    <!-- 检出性能,范围1-10,越大检出率越低,但误检越小. 默认2 -->
-    <Neighbors_Num_Min>2</Neighbors_Num_Min>
-    <!-- 检测精细度,范围1.1 ~ 1.80,越大速度越快,但越不精细. 默认1.20 -->
-    <scale_up_ratio>1.2000000476837158e+000</scale_up_ratio>
-    <!-- 检测精细度,范围0.9 ~ 0.3,越小速度越快,但越不精细. 默认0.709 -->
-    <scale_up_ratio_dl>7.0899999141693115e-001</scale_up_ratio_dl>
-    <!-- 检测精细度,范围大于等于1,越大速度越慢,但越精细。默认2(此参数建议不动) -->
-    <step_win_in_pixel_dl>2</step_win_in_pixel_dl>
-    <!-- 检测精细度,范围0.1 ~ 0.25,越大速度越快,但越不精细. 默认0.13 -->
-    <Win_Step_Ratio>1.2999999523162842e-001</Win_Step_Ratio>
-    <!-- 局部检测子图扩展比例,范围[1.4,3.0],默认1.80 -->
-    <Local_ImgSize_Expand_Ratio>1.7999999523162842e+000</Local_ImgSize_Expand_Ratio>
-    <!-- 局部检测人脸扩展比,范围[1.2~1.5],默认1.30 -->
-    <Local_FaceSize_Range_Ratio>1.2999999523162842e+000</Local_FaceSize_Range_Ratio>
-    <!-- 跟踪开关下,后续计算(关键点或对齐或质量分)开关打开时的隔帧参数,范围[0,2],0-所有检测人脸都进行后续计算,1-仅全局检测人脸不计算,2-每隔一帧进行后续计算. 默认2 -->
-    <Post_Detect_Frequency>0</Post_Detect_Frequency>
-    <!-- 没有检测到的人脸是否进行“串联+跟踪”.0关闭,非0开启,默认1开启 -->
-    <Lost_Track>1</Lost_Track>
-    <!-- 预跟踪帧数,默认2 -->
-    <Frame_Num_For_New>2</Frame_Num_For_New>
-    <!-- 检测性能水平,相当于检测图缩小比例,范围(1~6).数字越小缩小比例越大,反之亦然.默认1,如果性能不好建议6(不做任何缩放),如果检测超大图可设置1~5. -->
-    <Perfmon_Level>1</Perfmon_Level>
-    <!-- 图像预处理方式, 0-不做任何处理, 1-高斯模糊预处理, 其他-不错处理, 默认0不做预处理. -->
-    <Image_Preprocess_Mode>0</Image_Preprocess_Mode>
-    <!-- 对齐人脸图颜色空间类型:1-灰度图;2-双通道图;3-彩色图 -->
-    <ImgAligned_Color_Mode>1</ImgAligned_Color_Mode>
-    <!-- 静态误检过滤所需人脸框数量,为正则启动过滤,为0或者为负则不启动,默认0不启动。 -->
-    <Fake_Face_Filter_Aggregate>0.</Fake_Face_Filter_Aggregate>
-    <!-- 是否过滤比设定的最小人脸还小的人脸,0-不过滤,非0-过滤 -->
-    <face_filter_over_small>1</face_filter_over_small>
-    <!-- 等时间间距选最佳人脸策略下的第一阶段如果有人脸过此阈值,直接抛出该阶段的最佳人脸 -->
-    <best_face_first_threshold>8.0000001192092896e-001</best_face_first_threshold>
-    <!-- 是否打开检测器属性:1-打开,其他关闭。默认打开。 -->
-    <open_property_by_detector>0</open_property_by_detector>
-    <!-- 检测角度过滤阈值,建议0.125。值越大速度越快,越多的人脸(质量较差的)被滤掉,折中考虑. -->
-    <filt_by_detected_angle_threshold>0</filt_by_detected_angle_threshold>
-    <!-- 是否启动快速质量分模式(使用关键点得分作为质量总分,其他质量分项将无意义),1开启,其他关闭 -->
-    <fast_quality_mode>0</fast_quality_mode>
-    <!-- 算法是否复用用户传进来的图像data. -->
-    <frame_data_reused>1</frame_data_reused>
-    <!-- 总分开关 -->
-    <Quality_TotalScore_Switch>1</Quality_TotalScore_Switch>
-    <!-- 模糊光照分开关 -->
-    <Quality_IMG_Switch>0</Quality_IMG_Switch>
-    <!-- 肤色真假分开关 -->
-    <Quality_Skinness_Switch>1</Quality_Skinness_Switch>
-    <!-- 姿态(角度)分开关 -->
-    <Quality_Pose_Switch>0</Quality_Pose_Switch>
-    <!-- 眼睛分开关 -->
-    <Quality_EyeOpenness_Switch>0</Quality_EyeOpenness_Switch>
-    <!-- 嘴巴分开关 -->
-    <Quality_MouthOpenness_Switch>0</Quality_MouthOpenness_Switch>
-    <!-- 黑框眼镜分开关 -->
-    <Quality_BlackFrameglass_Switch>0</Quality_BlackFrameglass_Switch>
-    <!-- 墨镜分开关 -->
-    <Quality_Sunglass_Switch>0</Quality_Sunglass_Switch>
-    <!-- 眼睛被遮挡分开关 -->
-    <Quality_EyeOcclusion_Switch>0</Quality_EyeOcclusion_Switch>
-    <!-- mog清晰度分开关 -->
-    <Quality_MogClearness_Switch>0</Quality_MogClearness_Switch>
-    <!-- 所有质量开关综合 -->
-    <All_Score_Switch>0</All_Score_Switch>
-    <!-- Face_DetTrack_Params -->
-    </Face_DetTrack_Params>
-  <!-- VIP Frontend Configurations by Cloudwalk. -->
-  </VIP_Frontend>
+    <VIP_Frontend>
+        <Face_INIT_Params>
+
+            <Worker_Num>3</Worker_Num>
+
+            <Worker_DETS_Num>3</Worker_DETS_Num>
+
+            <Worker_DETW_Num>2</Worker_DETW_Num>
+
+            <Model_Mode>0</Model_Mode>
+
+            <str_face_detect_model_dl>faceDetector_3_15deepnet1_4_3.mdl</str_face_detect_model_dl>
+
+            <str_face_detect_model>faceDetector_2_4.mdl</str_face_detect_model>
+
+            <str_face_keypt_model_dl>landmarks_2_0_2.bin</str_face_keypt_model_dl>
+
+            <str_face_quality_model>face_quality_4_2.bin</str_face_quality_model>
+
+        </Face_INIT_Params>
+
+        <Face_DetTrack_Params>
+
+            <Faces_Num_Max_Frame>20</Faces_Num_Max_Frame>
+
+            <Neighbors_Num_Min>2</Neighbors_Num_Min>
+
+            <scale_up_ratio>1.2000000476837158e+000</scale_up_ratio>
+
+            <scale_up_ratio_dl>7.0899999141693115e-001</scale_up_ratio_dl>
+
+            <step_win_in_pixel_dl>2</step_win_in_pixel_dl>
+
+            <Win_Step_Ratio>1.2999999523162842e-001</Win_Step_Ratio>
+
+            <Local_ImgSize_Expand_Ratio>1.7999999523162842e+000</Local_ImgSize_Expand_Ratio>
+
+            <Local_FaceSize_Range_Ratio>1.2999999523162842e+000</Local_FaceSize_Range_Ratio>
+
+            <Post_Detect_Frequency>0</Post_Detect_Frequency>
+
+            <Lost_Track>1</Lost_Track>
+
+            <Frame_Num_For_New>2</Frame_Num_For_New>
+
+            <Perfmon_Level>1</Perfmon_Level>
+
+            <Image_Preprocess_Mode>0</Image_Preprocess_Mode>
+
+            <ImgAligned_Color_Mode>3</ImgAligned_Color_Mode>
+
+            <Fake_Face_Filter_Aggregate>0</Fake_Face_Filter_Aggregate>
+
+            <face_filter_over_small>1</face_filter_over_small>
+
+            <best_face_first_threshold>8.0000001192092896e-001</best_face_first_threshold>
+
+            <open_property_by_detector>0</open_property_by_detector>
+
+            <filt_by_detected_angle_threshold>0</filt_by_detected_angle_threshold>
+
+            <fast_quality_mode>0</fast_quality_mode>
+
+            <frame_data_reused>1</frame_data_reused>
+
+            <Quality_TotalScore_Switch>1</Quality_TotalScore_Switch>
+
+            <Quality_IMG_Switch>0</Quality_IMG_Switch>
+
+            <Quality_Skinness_Switch>1</Quality_Skinness_Switch>
+
+            <Quality_Pose_Switch>0</Quality_Pose_Switch>
+
+            <Quality_EyeOpenness_Switch>0</Quality_EyeOpenness_Switch>
+
+            <Quality_MouthOpenness_Switch>0</Quality_MouthOpenness_Switch>
+
+            <Quality_BlackFrameglass_Switch>0</Quality_BlackFrameglass_Switch>
+
+            <Quality_Sunglass_Switch>0</Quality_Sunglass_Switch>
+
+            <Quality_EyeOcclusion_Switch>0</Quality_EyeOcclusion_Switch>
+
+            <Quality_MogClearness_Switch>0</Quality_MogClearness_Switch>
+
+            <All_Score_Switch>0</All_Score_Switch>
+
+        </Face_DetTrack_Params>
+
+    </VIP_Frontend>
 </opencv_storage>
 </opencv_storage>

+ 33 - 20
app/src/main/java/com/ethan/psbc/managers/impls/ImplFaceManager.kt

@@ -8,8 +8,9 @@ import cn.face.sdk.FaceParam
 import cn.face.sdk.FaceRecog
 import cn.face.sdk.FaceRecog
 import com.ethan.psbc.constants.ConstantApp
 import com.ethan.psbc.constants.ConstantApp
 import com.ethan.psbc.managers.IFaceManager
 import com.ethan.psbc.managers.IFaceManager
+import com.google.gson.Gson
+import com.google.gson.GsonBuilder
 import org.koin.core.component.KoinComponent
 import org.koin.core.component.KoinComponent
-
 import java.io.File
 import java.io.File
 
 
 class ImplFaceManager: IFaceManager, KoinComponent {
 class ImplFaceManager: IFaceManager, KoinComponent {
@@ -37,23 +38,6 @@ class ImplFaceManager: IFaceManager, KoinComponent {
 
 
 
 
 
 
-            try{
-                FaceRecog.getInstance()
-                val faceRegConfigFile: File?
-                faceRegConfigFile = File("$dirPath/CWModels", "CWR_Config_1_1.xml")
-                val res= FaceRecog.cwCreateRecogHandle(faceRegConfigFile.absolutePath, ConstantApp.faceLicense, 0)
-                if (res >= FaceInterface.cw_errcode_t.CW_UNKNOWN_ERR) {
-                    Log.d("demo", "创建人脸识别句柄失败")
-                    Log.d("demo", "人脸识别句柄加载失败:${res}" )
-                } else {
-                    Log.d("demo", "创建人脸识别句柄成功" )
-                    Log.d("demo", "获取人脸识别句柄限制数:${res}")
-                    faceRegFlag=true
-                }
-            }catch (e:Exception){
-                throw e
-            }
-
             try {
             try {
                 FaceDetTrack.getInstance()
                 FaceDetTrack.getInstance()
                 val faceDetConfigFile: File?
                 val faceDetConfigFile: File?
@@ -68,14 +52,43 @@ class ImplFaceManager: IFaceManager, KoinComponent {
                     faceDetFlag=true
                     faceDetFlag=true
 
 
                     Log.d("demo", "设置人脸跟踪参数" )
                     Log.d("demo", "设置人脸跟踪参数" )
-                    var faceParam=FaceParam()
-                    FaceDetTrack.cwSetFaceParam(res,faceParam,dirPath)
+                    var faceParam= FaceParam()
+                    FaceDetTrack.cwGetFaceParam(res,faceParam)
+                    faceParam.minSize=100
+                    faceParam.maxSize=400
+                    FaceDetTrack.cwSetFaceParam(res,faceParam,faceDetConfigFile.absolutePath)
+
+                    val gson: Gson = GsonBuilder().create()
+                    var gsonDetail=gson.toJson(faceParam)
+                    Log.d("demo", "人脸跟踪参数明细:${gsonDetail}" )
+                }
+            }catch (e:Exception){
+                throw e
+            }
+
+
+
+
+            try{
+                FaceRecog.getInstance()
+                val faceRegConfigFile: File?
+                faceRegConfigFile = File("$dirPath/CWModels", "CWR_Config_1_1.xml")
+                val res= FaceRecog.cwCreateRecogHandle(faceRegConfigFile.absolutePath, ConstantApp.faceLicense, 0)
+                if (res >= FaceInterface.cw_errcode_t.CW_UNKNOWN_ERR) {
+                    Log.d("demo", "创建人脸识别句柄失败")
+                    Log.d("demo", "人脸识别句柄加载失败:${res}" )
+                } else {
+                    Log.d("demo", "创建人脸识别句柄成功" )
+                    Log.d("demo", "获取人脸识别句柄限制数:${res}")
+                    faceRegFlag=true
                 }
                 }
             }catch (e:Exception){
             }catch (e:Exception){
                 throw e
                 throw e
             }
             }
 
 
 
 
+
+
             if(faceDetFlag&&faceRegFlag){
             if(faceDetFlag&&faceRegFlag){
                 faceHandleRet=0
                 faceHandleRet=0
             }
             }

+ 93 - 12
app/src/main/java/com/ethan/psbc/ui/dialogs/DialogValidateFace.kt

@@ -3,11 +3,16 @@ package com.ethan.psbc.ui.dialogs
 import android.Manifest
 import android.Manifest
 import android.content.Context
 import android.content.Context
 import android.content.pm.PackageManager
 import android.content.pm.PackageManager
+import android.graphics.ImageFormat
 import android.util.Log
 import android.util.Log
+import android.util.Size
 import android.view.View
 import android.view.View
 import android.widget.Toast
 import android.widget.Toast
 import androidx.appcompat.app.AppCompatActivity
 import androidx.appcompat.app.AppCompatActivity
 import androidx.camera.core.*
 import androidx.camera.core.*
+import androidx.camera.core.ImageAnalysis.OUTPUT_IMAGE_FORMAT_YUV_420_888
+import androidx.camera.core.resolutionselector.ResolutionSelector
+import androidx.camera.core.resolutionselector.ResolutionStrategy
 import androidx.camera.lifecycle.ProcessCameraProvider
 import androidx.camera.lifecycle.ProcessCameraProvider
 import androidx.camera.view.PreviewView
 import androidx.camera.view.PreviewView
 import androidx.core.app.ActivityCompat
 import androidx.core.app.ActivityCompat
@@ -26,6 +31,8 @@ import org.koin.core.component.inject
 import java.nio.ByteBuffer
 import java.nio.ByteBuffer
 import java.util.concurrent.ExecutorService
 import java.util.concurrent.ExecutorService
 import java.util.concurrent.Executors
 import java.util.concurrent.Executors
+import kotlin.math.min
+
 
 
 /**
 /**
  * <p>人脸验证界面</p>
  * <p>人脸验证界面</p>
@@ -64,6 +71,11 @@ class DialogValidateFace( mContext: Context) : FullScreenPopupView(mContext), Ko
     }
     }
 
 
 
 
+
+
+
+
+
     private lateinit var cameraExecutor: ExecutorService
     private lateinit var cameraExecutor: ExecutorService
     private var cameraProvider: ProcessCameraProvider? = null
     private var cameraProvider: ProcessCameraProvider? = null
     private var preview: Preview? = null
     private var preview: Preview? = null
@@ -93,8 +105,14 @@ class DialogValidateFace( mContext: Context) : FullScreenPopupView(mContext), Ko
             cameraProviderFuture.addListener({
             cameraProviderFuture.addListener({
                 cameraProvider = cameraProviderFuture.get()//获取相机信息
                 cameraProvider = cameraProviderFuture.get()//获取相机信息
 
 
+
+
+                val resolutionBuilder = ResolutionSelector
+                    .Builder()
+                    .setResolutionStrategy(ResolutionStrategy(Size(640, 480), ResolutionStrategy.FALLBACK_RULE_NONE))
+                    .build()
                 //预览配置
                 //预览配置
-                preview = Preview.Builder()
+                preview = Preview.Builder().setResolutionSelector(resolutionBuilder)
                     .build()
                     .build()
 
 
 
 
@@ -102,27 +120,86 @@ class DialogValidateFace( mContext: Context) : FullScreenPopupView(mContext), Ko
 
 
 
 
 
 
-
                 imageCamera = ImageCapture.Builder()
                 imageCamera = ImageCapture.Builder()
                     .setCaptureMode(ImageCapture.CAPTURE_MODE_MINIMIZE_LATENCY)
                     .setCaptureMode(ImageCapture.CAPTURE_MODE_MINIMIZE_LATENCY)
                     .build()
                     .build()
 
 
 
 
-                imageAnalysis = ImageAnalysis.Builder()
+                imageAnalysis = ImageAnalysis.Builder().setOutputImageFormat(OUTPUT_IMAGE_FORMAT_YUV_420_888)
                     .setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
                     .setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
+                    .setTargetResolution(Size(640, 480))
                     .build()
                     .build()
 
 
 
 
                 imageAnalysis.setAnalyzer(cameraExecutor) { imageProxy ->
                 imageAnalysis.setAnalyzer(cameraExecutor) { imageProxy ->
-                    val planes: Array<ImageProxy.PlaneProxy> = imageProxy.planes
-                    val buffer: ByteBuffer = planes[0].buffer;
-                    val data: ByteArray = ByteArray(buffer.capacity())
-                    buffer.rewind()
-                    buffer.get(data)
 
 
+                   var  imageFormat=imageProxy.format
 
 
-                    //第一步调用人脸跟踪API分析图像流特征
+                    if (ImageFormat.YUV_420_888 == imageProxy.getFormat()) {
+                        Log.d("demo", "图片格式为:YUV_420_888")
+                    }
+
+
+
+                    val yPlane: ImageProxy.PlaneProxy =imageProxy.planes[0]
+                    val uPlane: ImageProxy.PlaneProxy =imageProxy.planes[1]
+                    val vPlane: ImageProxy.PlaneProxy =imageProxy.planes[2]
+
+                    val yBuffer: ByteBuffer=yPlane.buffer
+                    val uBuffer: ByteBuffer=uPlane.buffer
+                    val vBuffer: ByteBuffer=vPlane.buffer
+
+
+                    yBuffer.rewind();
+                    uBuffer.rewind();
+                    vBuffer.rewind();
+
+
+                    var ySize:Int = yBuffer.remaining()
+                    var position:Int= 0;
+                    var nv21:ByteArray = ByteArray(ySize + imageProxy.width * imageProxy.height / 2);
 
 
+                    var chromaHeight:Int=0
+
+
+                    for( i   in  0.rangeTo(imageProxy.getHeight()) step  1) {
+                    yBuffer.get(nv21, position, imageProxy.getWidth());
+                    position += imageProxy.getWidth();
+                    yBuffer.position(Math.min(ySize, yBuffer.position() - imageProxy.getWidth() + yPlane.getRowStride()));
+                        chromaHeight+= chromaHeight+1
+                    }
+
+
+                    chromaHeight = imageProxy.getHeight() / 2
+                    var chromaWidth:Int = imageProxy.getWidth() / 2
+                    var vRowStride:Int  = vPlane.getRowStride()
+                    var uRowStride:Int  = uPlane.getRowStride()
+                    var vPixelStride:Int  = vPlane.getPixelStride()
+                    var uPixelStride:Int  = uPlane.getPixelStride()
+                    var vLineBuffer:ByteArray = ByteArray(vRowStride)
+                    var uLineBuffer:ByteArray = ByteArray(uRowStride)
+
+                    for (row in 0 until chromaHeight) {
+                        vBuffer[vLineBuffer, 0, min(vRowStride.toDouble(), vBuffer.remaining().toDouble()).toInt()]
+                        uBuffer[uLineBuffer, 0, min(uRowStride.toDouble(), uBuffer.remaining().toDouble()).toInt()]
+                        var vLineBufferPosition = 0
+                        var uLineBufferPosition = 0
+
+                        for (col in 0 until chromaWidth) {
+                            nv21[position++] = vLineBuffer[vLineBufferPosition]
+                            nv21[position++] = uLineBuffer[uLineBufferPosition]
+                            vLineBufferPosition += vPixelStride
+                            uLineBufferPosition += uPixelStride
+                        }
+                    }
+
+
+
+
+
+
+
+                    //第一步调用人脸跟踪API分析图像流特征
 
 
 
 
                     if(handle==-1){
                     if(handle==-1){
@@ -132,9 +209,8 @@ class DialogValidateFace( mContext: Context) : FullScreenPopupView(mContext), Ko
                         try{
                         try{
                             val iFeaLen =FaceRecog.cwGetFeatureLength(handle)
                             val iFeaLen =FaceRecog.cwGetFeatureLength(handle)
                              Log.d("demo", "获取到人脸特征长度:${iFeaLen}")
                              Log.d("demo", "获取到人脸特征长度:${iFeaLen}")
-
-                            var pFaceBuffer=Array(3){_ ->FaceInfo()}
-                            val faceDetRet:Int= FaceDetTrack.cwFaceDetection(handle,data,0, 0, FaceInterface.cw_img_form_t.CW_IMAGE_BINARY, 0, 0, FaceInterface.cw_op_t.CW_OP_ALIGN , pFaceBuffer)
+                            var pFaceBuffer=Array(1){_ ->FaceInfo()}
+                            val faceDetRet:Int= FaceDetTrack.cwFaceDetection(handle,nv21,640, 480, FaceInterface.cw_img_form_t.CW_IMAGE_NV21 , 0, 0, 	FaceInterface.cw_op_t.CW_OP_DET , pFaceBuffer)
                             if (faceDetRet >= FaceInterface.cw_errcode_t.CW_UNKNOWN_ERR) {
                             if (faceDetRet >= FaceInterface.cw_errcode_t.CW_UNKNOWN_ERR) {
                                 Log.d("demo", "检测到人脸异常:${faceDetRet}" )
                                 Log.d("demo", "检测到人脸异常:${faceDetRet}" )
                             }else  if(faceDetRet<1){
                             }else  if(faceDetRet<1){
@@ -177,6 +253,9 @@ class DialogValidateFace( mContext: Context) : FullScreenPopupView(mContext), Ko
 
 
                 }
                 }
 
 
+                val cameraCtrl=camera.cameraControl
+                val cameraInfo=camera.cameraInfo
+
 
 
 
 
 
 
@@ -197,4 +276,6 @@ class DialogValidateFace( mContext: Context) : FullScreenPopupView(mContext), Ko
 
 
 
 
 
 
+
+
 }
 }

+ 2 - 2
app/src/main/res/layout/dialog_validate_face.xml

@@ -21,8 +21,8 @@
 
 
         <androidx.camera.view.PreviewView
         <androidx.camera.view.PreviewView
             android:id="@+id/mPreviewView"
             android:id="@+id/mPreviewView"
-            android:layout_width="50dp"
-            android:layout_height="50dp"
+            android:layout_width="0dp"
+            android:layout_height="0dp"
             app:layout_constraintBottom_toBottomOf="parent"
             app:layout_constraintBottom_toBottomOf="parent"
             app:layout_constraintLeft_toLeftOf="parent"
             app:layout_constraintLeft_toLeftOf="parent"
             app:layout_constraintRight_toRightOf="parent"
             app:layout_constraintRight_toRightOf="parent"