代码之家  ›  专栏  ›  技术社区  ›  Pranavan SP

基于swift 3的摄像机实时人脸检测

  •  1
  • Pranavan SP  · 技术社区  · 7 年前

    我如何像“摄像机”一样实时进行人脸检测?脸周围和脸上都是白色的圆形。我使用 AVCapturSession

    密码

    class CameraFaceRecongnitionVC: UIViewController {
    
        @IBOutlet weak var imgOverlay: UIImageView!
        @IBOutlet weak var btnCapture: UIButton!
    
        let captureSession = AVCaptureSession()
        let stillImageOutput = AVCaptureStillImageOutput()
        var previewLayer : AVCaptureVideoPreviewLayer?
    
        // If we find a device we'll store it here for later use
        var captureDevice : AVCaptureDevice?
    
        override func viewDidLoad() {
            super.viewDidLoad()
            btnCapture.CameraButton()
            roundButton.RoundButtonForFaceRecong()
    
            // Do any additional setup after loading the view, typically from a nib.
            captureSession.sessionPreset = AVCaptureSessionPresetHigh
    
            if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice] {
                // Loop through all the capture devices on this phone
                for device in devices {
                    // Make sure this particular device supports video
                    if (device.hasMediaType(AVMediaTypeVideo)) {
                // Finally check the position and confirm we've got the front camera
                        if(device.position == AVCaptureDevicePosition.front) {
                            captureDevice = device
                            if captureDevice != nil {
                                print("Capture device found")
                                beginSession()
                            }
                        }
                    }
                }
            }
        }
    
        @IBAction func actionCameraCapture(_ sender: AnyObject) {
    
            print("Camera button pressed")
            saveToCamera()
        }
    
        func beginSession() {
    
            do {
                try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
                stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
    
                if captureSession.canAddOutput(stillImageOutput) {
                    captureSession.addOutput(stillImageOutput)
                }
    
            }
            catch {
                print("error: \(error.localizedDescription)")
            }
    
            guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else {
                print("no preview layer")
                return
            }
    
            self.view.layer.addSublayer(previewLayer)
            previewLayer.frame = self.view.layer.frame
            captureSession.startRunning()
    
           // self.view.addSubview(navigationBar)
            self.view.addSubview(imgOverlay)
            self.view.addSubview(btnCapture)
        }
    
        func saveToCamera() {
    
            if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
    
                stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
                    if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {
    
                        if let cameraImage = UIImage(data: imageData) {
    
                            UIImageWriteToSavedPhotosAlbum(cameraImage, nil, nil, nil)
                        }
                    }
                })
            }
        }
    
        override func didReceiveMemoryWarning() {
            super.didReceiveMemoryWarning()
            // Dispose of any resources that can be recreated.
        }
    
    }
    
    3 回复  |  直到 7 年前
        1
  •  5
  •   Pranavan SP    5 年前

    Swift 3

    我发现了一个使用AVFoundation的解决方案,可以在iOS上实时创建方形人脸跟踪。我在这里修改了一些代码。

    import UIKit
    import AVFoundation
    
    class DetailsView: UIView {
        func setup() {
            layer.borderColor = UIColor.red.withAlphaComponent(0.7).cgColor
            layer.borderWidth = 5.0
        }
    }
    
    
    class ViewController: UIViewController {
    
        let stillImageOutput = AVCaptureStillImageOutput()
    
        var session: AVCaptureSession?
        var stillOutput = AVCaptureStillImageOutput()
        var borderLayer: CAShapeLayer?
    
        let detailsView: DetailsView = {
            let detailsView = DetailsView()
            detailsView.setup()
    
            return detailsView
        }()
    
        lazy var previewLayer: AVCaptureVideoPreviewLayer? = {
            var previewLay = AVCaptureVideoPreviewLayer(session: self.session!)
            previewLay?.videoGravity = AVLayerVideoGravityResizeAspectFill
    
            return previewLay
        }()
    
        lazy var frontCamera: AVCaptureDevice? = {
            guard let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as? [AVCaptureDevice] else { return nil }
    
            return devices.filter { .position == .front }.first
        }()
    
        let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: [CIDetectorAccuracy : CIDetectorAccuracyLow])
    
        override func viewDidLayoutSubviews() {
            super.viewDidLayoutSubviews()
            previewLayer?.frame = view.frame
        }
    
        override func viewDidAppear(_ animated: Bool) {
            super.viewDidAppear(animated)
            guard let previewLayer = previewLayer else { return }
    
            view.layer.addSublayer(previewLayer)
            view.addSubview(detailsView)
            view.bringSubview(toFront: detailsView)
        }
    
        override func viewDidLoad() {
            super.viewDidLoad()
            sessionPrepare()
            session?.startRunning()
        }
        //function to store image
        func saveToCamera() {
    
            if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
    
                stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
                    if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {
    
                        if let cameraImage = UIImage(data: imageData) {
    
                            UIImageWriteToSavedPhotosAlbum(cameraImage, nil, nil, nil)
                        }
                    }
                })
            }
        }
    }
    
    extension ViewController {
    
        func sessionPrepare() {
            session = AVCaptureSession()
    
            guard let session = session, let captureDevice = frontCamera else { return }
    
            session.sessionPreset = AVCaptureSessionPresetPhoto
    
    
            do {
                let deviceInput = try AVCaptureDeviceInput(device: captureDevice)
                session.beginConfiguration()
                stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
    
                if session.canAddOutput(stillImageOutput) {
                    session.addOutput(stillImageOutput)
                }
    
                if session.canAddInput(deviceInput) {
                    session.addInput(deviceInput)
                }
    
                let output = AVCaptureVideoDataOutput()
                output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)]
    
                output.alwaysDiscardsLateVideoFrames = true
    
                if session.canAddOutput(output) {
                    session.addOutput(output)
                }
    
                session.commitConfiguration()
    
                let queue = DispatchQueue(label: "output.queue")
                output.setSampleBufferDelegate(self, queue: queue)
    
            } catch {
                print("error with creating AVCaptureDeviceInput")
            }
        }
    }
    
    extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
        func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
            let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
            let attachments = CMCopyDictionaryOfAttachments(kCFAllocatorDefault, sampleBuffer, kCMAttachmentMode_ShouldPropagate)
            let ciImage = CIImage(cvImageBuffer: pixelBuffer!, options: attachments as! [String : Any]?)
            let options: [String : Any] = [CIDetectorImageOrientation: exifOrientation(orientation: UIDevice.current.orientation),
                                           CIDetectorSmile: true,
                                           CIDetectorEyeBlink: true]
            let allFeatures = faceDetector?.features(in: ciImage, options: options)
    
            let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)
            let cleanAperture = CMVideoFormatDescriptionGetCleanAperture(formatDescription!, false)
    
            guard let features = allFeatures else { return }
    
            for feature in features {
                if let faceFeature = feature as? CIFaceFeature {
                    let faceRect = calculateFaceRect(facePosition: faceFeature.mouthPosition, faceBounds: faceFeature.bounds, clearAperture: cleanAperture)
                    update(with: faceRect)
                }
            }
    
            if features.count == 0 {
                DispatchQueue.main.async {
                    self.detailsView.alpha = 0.0
                }
            }
    
        }
    
        func exifOrientation(orientation: UIDeviceOrientation) -> Int {
            switch orientation {
            case .portraitUpsideDown:
                return 8
            case .landscapeLeft:
                return 3
            case .landscapeRight:
                return 1
            default:
                return 6
            }
        }
    
        func videoBox(frameSize: CGSize, apertureSize: CGSize) -> CGRect {
            let apertureRatio = apertureSize.height / apertureSize.width
            let viewRatio = frameSize.width / frameSize.height
    
            var size = CGSize.zero
    
            if (viewRatio > apertureRatio) {
                size.width = frameSize.width
                size.height = apertureSize.width * (frameSize.width / apertureSize.height)
            } else {
                size.width = apertureSize.height * (frameSize.height / apertureSize.width)
                size.height = frameSize.height
            }
    
            var videoBox = CGRect(origin: .zero, size: size)
    
            if (size.width < frameSize.width) {
                videoBox.origin.x = (frameSize.width - size.width) / 2.0
            } else {
                videoBox.origin.x = (size.width - frameSize.width) / 2.0
            }
    
            if (size.height < frameSize.height) {
                videoBox.origin.y = (frameSize.height - size.height) / 2.0
            } else {
                videoBox.origin.y = (size.height - frameSize.height) / 2.0
            }
    
            return videoBox
        }
    
        func calculateFaceRect(facePosition: CGPoint, faceBounds: CGRect, clearAperture: CGRect) -> CGRect {
            let parentFrameSize = previewLayer!.frame.size
            let previewBox = videoBox(frameSize: parentFrameSize, apertureSize: clearAperture.size)
    
            var faceRect = faceBounds
    
            swap(&faceRect.size.width, &faceRect.size.height)
            swap(&faceRect.origin.x, &faceRect.origin.y)
    
            let widthScaleBy = previewBox.size.width / clearAperture.size.height
            let heightScaleBy = previewBox.size.height / clearAperture.size.width
    
            faceRect.size.width *= widthScaleBy
            faceRect.size.height *= heightScaleBy
            faceRect.origin.x *= widthScaleBy
            faceRect.origin.y *= heightScaleBy
    
            faceRect = faceRect.offsetBy(dx: 0.0, dy: previewBox.origin.y)
            let frame = CGRect(x: parentFrameSize.width - faceRect.origin.x - faceRect.size.width - previewBox.origin.x / 2.0, y: faceRect.origin.y, width: faceRect.width, height: faceRect.height)
    
            return frame
        }
    
    }
    extension ViewController {
        func update(with faceRect: CGRect) {
            DispatchQueue.main.async {
                UIView.animate(withDuration: 0.2) {
                    self.detailsView.alpha = 1.0
                    self.detailsView.frame = faceRect
                }
            }
        }
    }
    

    Swift 4

    苹果自己的视觉框架可从Swift 4实时检测人脸。 click the link 用于文档和示例应用程序。

        2
  •  0
  •   Giorgio Tempesta    6 年前

    对于任何想寻找一个有效且更新的例子的人来说,苹果的网站是我迄今为止发现的最好的: https://developer.apple.com/documentation/vision/tracking_the_user_s_face_in_real_time

    你必须换两行:在 AppDelegate.swift 你需要换行 15

    func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplicationLaunchOptionsKey: Any]?) -> Bool {
    

    并且在 ViewController.swift 你需要换行 462

    let cameraIntrinsicData = CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, nil)
    
        3
  •  0
  •   Vasily Bodnarchuk    5 年前

    基于此 article

    • Xcode 10.2.1(10E1001),Swift 5

    该解决方案允许:

    • 检查摄像机访问
    • 如果无法访问摄像头,则显示带有应用程序设置页面链接的警报
    • 高亮显示面部(面部矩形+眉毛、嘴唇…)

    解决方案

    import UIKit
    import AVFoundation
    import Vision
    
    class FaceDetectionService: NSObject {
    
        private weak var previewView: UIView?
        private weak var faceView: FaceView?
        private var cameraIsReadyToUse = false
        private let session = AVCaptureSession()
        private lazy var cameraPosition = AVCaptureDevice.Position.front
        private weak var previewLayer: AVCaptureVideoPreviewLayer?
        private lazy var sequenceHandler = VNSequenceRequestHandler()
        private lazy var dataOutputQueue = DispatchQueue(label: "FaceDetectionService",
                                                         qos: .userInitiated, attributes: [],
                                                         autoreleaseFrequency: .workItem)
        private var preparingCompletionHandler: ((Bool) -> Void)?
    
        func prepare(previewView: UIView,
                     cameraPosition: AVCaptureDevice.Position,
                     completion: ((Bool) -> Void)?) {
            self.previewView = previewView
            self.preparingCompletionHandler = completion
            self.cameraPosition = cameraPosition
            checkCameraAccess { allowed in
                if allowed { self.setup() }
                completion?(allowed)
                self.preparingCompletionHandler = nil
            }
        }
    
        private func setup() {
            guard let bounds = previewView?.bounds else { return }
            let faceView = FaceView(frame: bounds)
            previewView?.addSubview(faceView)
            faceView.backgroundColor = .clear
            self.faceView = faceView
            configureCaptureSession()
        }
        func start() { if cameraIsReadyToUse { session.startRunning() } }
        func stop() { session.stopRunning() }
    }
    
    extension FaceDetectionService {
    
        private func askUserForCameraPermission(_ completion:  ((Bool) -> Void)?) {
            AVCaptureDevice.requestAccess(for: AVMediaType.video) { (allowedAccess) -> Void in
                DispatchQueue.main.async { completion?(allowedAccess) }
            }
        }
    
        private func checkCameraAccess(completion: ((Bool) -> Void)?) {
            askUserForCameraPermission { [weak self] allowed in
                guard let self = self, let completion = completion else { return }
                self.cameraIsReadyToUse = allowed
                if allowed {
                    completion(true)
                } else {
                    self.showDisabledCameraAlert(completion: completion)
                }
            }
        }
    
        private func configureCaptureSession() {
            guard let previewView = previewView else { return }
            // Define the capture device we want to use
    
            guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: cameraPosition) else {
                let error = NSError(domain: "", code: 0, userInfo: [NSLocalizedDescriptionKey : "No front camera available"])
                show(error: error)
                return
            }
    
            // Connect the camera to the capture session input
            do {
    
                try camera.lockForConfiguration()
                defer { camera.unlockForConfiguration() }
    
                if camera.isFocusModeSupported(.continuousAutoFocus) {
                    camera.focusMode = .continuousAutoFocus
                }
    
                if camera.isExposureModeSupported(.continuousAutoExposure) {
                    camera.exposureMode = .continuousAutoExposure
                }
    
                let cameraInput = try AVCaptureDeviceInput(device: camera)
                session.addInput(cameraInput)
    
            } catch {
                show(error: error as NSError)
                return
            }
    
            // Create the video data output
            let videoOutput = AVCaptureVideoDataOutput()
            videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
            videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
    
            // Add the video output to the capture session
            session.addOutput(videoOutput)
    
            let videoConnection = videoOutput.connection(with: .video)
            videoConnection?.videoOrientation = .portrait
    
            // Configure the preview layer
            let previewLayer = AVCaptureVideoPreviewLayer(session: session)
            previewLayer.videoGravity = .resizeAspectFill
            previewLayer.frame = previewView.bounds
            previewView.layer.insertSublayer(previewLayer, at: 0)
            self.previewLayer = previewLayer
        }
    }
    
    extension FaceDetectionService: AVCaptureVideoDataOutputSampleBufferDelegate {
        func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
            guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
            let detectFaceRequest = VNDetectFaceLandmarksRequest(completionHandler: detectedFace)
            do {
                try sequenceHandler.perform(
                    [detectFaceRequest],
                    on: imageBuffer,
                    orientation: .leftMirrored)
            } catch { show(error: error as NSError) }
        }
    }
    
    extension FaceDetectionService {
        private func detectedFace(request: VNRequest, error: Error?) {
            guard   let previewLayer = previewLayer,
                    let results = request.results as? [VNFaceObservation],
                    let result = results.first
                    else { faceView?.clearAndSetNeedsDisplay(); return }
            faceView?.read(result: result, previewLayer: previewLayer)
        }
    }
    
    // Navigation
    
    extension FaceDetectionService {
    
        private func show(alert: UIAlertController) {
            DispatchQueue.main.async {
                UIApplication.topViewController?.present(alert, animated: true, completion: nil)
            }
        }
    
        private func showDisabledCameraAlert(completion: ((Bool) -> Void)?) {
            let alertVC = UIAlertController(title: "Enable Camera Access",
                                            message: "Please provide access to your camera",
                                            preferredStyle: .alert)
            alertVC.addAction(UIAlertAction(title: "Go to Settings", style: .default, handler: { action in
                guard   let previewView = self.previewView,
                    let settingsUrl = URL(string: UIApplication.openSettingsURLString),
                    UIApplication.shared.canOpenURL(settingsUrl) else { return }
                UIApplication.shared.open(settingsUrl) { [weak self] _ in
                    guard let self = self else { return }
                    self.prepare(previewView: previewView,
                                 cameraPosition: self.cameraPosition,
                                 completion: self.preparingCompletionHandler)
                }
            }))
            alertVC.addAction(UIAlertAction(title: "Cancel", style: .cancel, handler: { _ in completion?(false) }))
            show(alert: alertVC)
        }
    
        private func show(error: NSError) {
            let alertVC = UIAlertController(title: "Error", message: error.localizedDescription, preferredStyle: .alert)
            alertVC.addAction(UIAlertAction(title: "Ok", style: .cancel, handler: nil ))
            show(alert: alertVC)
        }
    }
    

    面视图

    import UIKit
    import Vision
    import AVFoundation
    
    struct FaceElement {
        let points: [CGPoint]
        let needToClosePath: Bool
    
        func draw(in context: CGContext) {
            if points.isEmpty { return }
            context.addLines(between: points)
            if needToClosePath { context.closePath() }
            context.strokePath()
        }
    }
    
    class FaceView: UIView {
        private var faceElements = [FaceElement]()
        private var boundingBox = CGRect.zero
    
        func clearAndSetNeedsDisplay() {
            faceElements = []
            boundingBox = .zero
            DispatchQueue.main.async { [weak self] in self?.setNeedsDisplay() }
        }
    
        private func drawElement(context: CGContext, points: [CGPoint], needToClosePath: Bool) {
            if !points.isEmpty {
                context.addLines(between: points)
                if needToClosePath { context.closePath() }
                context.strokePath()
            }
        }
    
        override func draw(_ rect: CGRect) {
            super.draw(rect)
            guard let context = UIGraphicsGetCurrentContext() else { return }
            context.saveGState()
            defer { context.restoreGState()}
    
            context.addRect(boundingBox)
            UIColor.red.setStroke()
            context.strokePath()
    
            UIColor.white.setStroke()
            faceElements.forEach { $0.draw(in: context) }
        }
    
        func read(result: VNFaceObservation, previewLayer: AVCaptureVideoPreviewLayer) {
            defer { DispatchQueue.main.async { [weak self] in self?.setNeedsDisplay() } }
    
            let rect = result.boundingBox
            let origin = previewLayer.layerPointConverted(fromCaptureDevicePoint: rect.origin)
            let size = previewLayer.layerPointConverted(fromCaptureDevicePoint: rect.size.cgPoint).cgSize
            boundingBox = CGRect(origin: origin, size: size)
    
            func addFaceElement(from landmark: VNFaceLandmarkRegion2D?, needToClosePath: Bool) {
                guard let normalizedPoints = landmark?.normalizedPoints else { return }
                let points = normalizedPoints.compactMap { point -> CGPoint in
                    let absolute = point.absolutePoint(in: result.boundingBox)
                    let converted = previewLayer.layerPointConverted(fromCaptureDevicePoint: absolute)
                    return converted
                }
                faceElements.append(FaceElement(points: points, needToClosePath: needToClosePath))
            }
    
            guard let landmarks = result.landmarks else { return }
            faceElements = []
            addFaceElement(from: landmarks.leftEye, needToClosePath: true)
            addFaceElement(from: landmarks.rightEye, needToClosePath: true)
            addFaceElement(from: landmarks.leftEyebrow, needToClosePath: false)
            addFaceElement(from: landmarks.rightEyebrow, needToClosePath: false)
            addFaceElement(from: landmarks.nose, needToClosePath: false)
            addFaceElement(from: landmarks.outerLips, needToClosePath: true)
            addFaceElement(from: landmarks.innerLips, needToClosePath: true)
            addFaceElement(from: landmarks.faceContour, needToClosePath: false)
        }
    }
    

    助手

    import CoreGraphics
    
    func + (left: CGPoint, right: CGPoint) -> CGPoint {
        return CGPoint(x: left.x + right.x, y: left.y + right.y)
    }
    
    extension CGSize {
        var cgPoint: CGPoint { return CGPoint(x: width, y: height) }
    }
    
    extension CGPoint {
        var cgSize: CGSize { return CGSize(width: x, height: y) }
    
        func absolutePoint(in rect: CGRect) -> CGPoint {
            return CGPoint(x: x * rect.size.width, y: y * rect.size.height) + rect.origin
        }
    }
    
    import UIKit
    
    extension UIApplication {
        private class func topViewController(controller: UIViewController? = UIApplication.shared.keyWindow?.rootViewController) -> UIViewController? {
            if let navigationController = controller as? UINavigationController {
                return topViewController(controller: navigationController.visibleViewController)
            }
            if let tabController = controller as? UITabBarController {
                if let selected = tabController.selectedViewController {
                    return topViewController(controller: selected)
                }
            }
            if let presented = controller?.presentedViewController {
                return topViewController(controller: presented)
            }
            return controller
        }
    
        class var topViewController: UIViewController? { return topViewController() }
    }
    

    用法

    private lazy var faceDetectionService = FaceDetectionService()
    
    //....
    
    faceDetectionService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] _ in
        self?.faceDetectionService.start()
    }
    

    import UIKit
    
    class ViewController: UIViewController {
    
        private lazy var faceDetectionService = FaceDetectionService()
        private weak var previewView: UIView!
    
        override func viewDidLoad() {
            super.viewDidLoad()
            // Do any additional setup after loading the view.
            let previewView = UIView(frame: .zero)
            view.addSubview(previewView)
            previewView.translatesAutoresizingMaskIntoConstraints = false
            previewView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
            previewView.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
            previewView.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
            previewView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
            previewView.layoutIfNeeded()
            self.previewView = previewView
    
            faceDetectionService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] _ in
                self?.faceDetectionService.start()
            }
        }
    
        // Ensure that the interface stays locked in Portrait.
        override var supportedInterfaceOrientations: UIInterfaceOrientationMask { return .portrait }
    
        // Ensure that the interface stays locked in Portrait.
        override var preferredInterfaceOrientationForPresentation: UIInterfaceOrientation { return .portrait }
    }
    

    更多信息