Various fixes

This commit is contained in:
Ilya Laktyushin 2023-07-15 17:12:16 +02:00
parent 547564eac3
commit 4f1913e37d
6 changed files with 23 additions and 354 deletions

View File

@ -1,170 +0,0 @@
import Foundation
import CoreImage
import CoreMedia
import CoreVideo
import Metal
protocol CameraFilter: AnyObject {
var isPrepared: Bool { get }
func prepare(with inputFormatDescription: CMFormatDescription, outputRetainedBufferCountHint: Int)
func reset()
var outputFormatDescription: CMFormatDescription? { get }
var inputFormatDescription: CMFormatDescription? { get }
func render(pixelBuffer: CVPixelBuffer) -> CVPixelBuffer?
}
func allocateOutputBufferPool(with inputFormatDescription: CMFormatDescription, outputRetainedBufferCountHint: Int) -> (
outputBufferPool: CVPixelBufferPool?,
outputColorSpace: CGColorSpace?,
outputFormatDescription: CMFormatDescription?) {
let inputMediaSubType = CMFormatDescriptionGetMediaSubType(inputFormatDescription)
if inputMediaSubType != kCVPixelFormatType_32BGRA {
assertionFailure("Invalid input pixel buffer type \(inputMediaSubType)")
return (nil, nil, nil)
}
let inputDimensions = CMVideoFormatDescriptionGetDimensions(inputFormatDescription)
var pixelBufferAttributes: [String: Any] = [
kCVPixelBufferPixelFormatTypeKey as String: UInt(inputMediaSubType),
kCVPixelBufferWidthKey as String: Int(inputDimensions.width),
kCVPixelBufferHeightKey as String: Int(inputDimensions.height),
kCVPixelBufferIOSurfacePropertiesKey as String: [:] as NSDictionary
]
var cgColorSpace = CGColorSpaceCreateDeviceRGB()
if let inputFormatDescriptionExtension = CMFormatDescriptionGetExtensions(inputFormatDescription) as Dictionary? {
let colorPrimaries = inputFormatDescriptionExtension[kCVImageBufferColorPrimariesKey]
if let colorPrimaries = colorPrimaries {
var colorSpaceProperties: [String: AnyObject] = [kCVImageBufferColorPrimariesKey as String: colorPrimaries]
if let yCbCrMatrix = inputFormatDescriptionExtension[kCVImageBufferYCbCrMatrixKey] {
colorSpaceProperties[kCVImageBufferYCbCrMatrixKey as String] = yCbCrMatrix
}
if let transferFunction = inputFormatDescriptionExtension[kCVImageBufferTransferFunctionKey] {
colorSpaceProperties[kCVImageBufferTransferFunctionKey as String] = transferFunction
}
pixelBufferAttributes[kCVBufferPropagatedAttachmentsKey as String] = colorSpaceProperties
}
if let cvColorspace = inputFormatDescriptionExtension[kCVImageBufferCGColorSpaceKey] {
cgColorSpace = cvColorspace as! CGColorSpace
} else if (colorPrimaries as? String) == (kCVImageBufferColorPrimaries_P3_D65 as String) {
cgColorSpace = CGColorSpace(name: CGColorSpace.displayP3)!
}
}
let poolAttributes = [kCVPixelBufferPoolMinimumBufferCountKey as String: outputRetainedBufferCountHint]
var cvPixelBufferPool: CVPixelBufferPool?
CVPixelBufferPoolCreate(kCFAllocatorDefault, poolAttributes as NSDictionary?, pixelBufferAttributes as NSDictionary?, &cvPixelBufferPool)
guard let pixelBufferPool = cvPixelBufferPool else {
assertionFailure("Allocation failure: Could not allocate pixel buffer pool.")
return (nil, nil, nil)
}
preallocateBuffers(pool: pixelBufferPool, allocationThreshold: outputRetainedBufferCountHint)
// Get the output format description.
var pixelBuffer: CVPixelBuffer?
var outputFormatDescription: CMFormatDescription?
let auxAttributes = [kCVPixelBufferPoolAllocationThresholdKey as String: outputRetainedBufferCountHint] as NSDictionary
CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(kCFAllocatorDefault, pixelBufferPool, auxAttributes, &pixelBuffer)
if let pixelBuffer = pixelBuffer {
CMVideoFormatDescriptionCreateForImageBuffer(allocator: kCFAllocatorDefault,
imageBuffer: pixelBuffer,
formatDescriptionOut: &outputFormatDescription)
}
pixelBuffer = nil
return (pixelBufferPool, cgColorSpace, outputFormatDescription)
}
private func preallocateBuffers(pool: CVPixelBufferPool, allocationThreshold: Int) {
var pixelBuffers = [CVPixelBuffer]()
var error: CVReturn = kCVReturnSuccess
let auxAttributes = [kCVPixelBufferPoolAllocationThresholdKey as String: allocationThreshold] as NSDictionary
var pixelBuffer: CVPixelBuffer?
while error == kCVReturnSuccess {
error = CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(kCFAllocatorDefault, pool, auxAttributes, &pixelBuffer)
if let pixelBuffer = pixelBuffer {
pixelBuffers.append(pixelBuffer)
}
pixelBuffer = nil
}
pixelBuffers.removeAll()
}
class CameraTestFilter: CameraFilter {
var isPrepared = false
private var ciContext: CIContext?
private var rosyFilter: CIFilter?
private var outputColorSpace: CGColorSpace?
private var outputPixelBufferPool: CVPixelBufferPool?
private(set) var outputFormatDescription: CMFormatDescription?
private(set) var inputFormatDescription: CMFormatDescription?
func prepare(with formatDescription: CMFormatDescription, outputRetainedBufferCountHint: Int) {
reset()
(outputPixelBufferPool,
outputColorSpace,
outputFormatDescription) = allocateOutputBufferPool(with: formatDescription,
outputRetainedBufferCountHint: outputRetainedBufferCountHint)
if outputPixelBufferPool == nil {
return
}
inputFormatDescription = formatDescription
ciContext = CIContext()
rosyFilter = CIFilter(name: "CIColorControls")
rosyFilter!.setValue(0.0, forKey: kCIInputBrightnessKey)
rosyFilter!.setValue(0.0, forKey: kCIInputSaturationKey)
rosyFilter!.setValue(1.1, forKey: kCIInputContrastKey)
isPrepared = true
}
func reset() {
ciContext = nil
rosyFilter = nil
outputColorSpace = nil
outputPixelBufferPool = nil
outputFormatDescription = nil
inputFormatDescription = nil
isPrepared = false
}
func render(pixelBuffer: CVPixelBuffer) -> CVPixelBuffer? {
guard let rosyFilter = rosyFilter,
let ciContext = ciContext,
isPrepared else {
return nil
}
let sourceImage = CIImage(cvImageBuffer: pixelBuffer)
rosyFilter.setValue(sourceImage, forKey: kCIInputImageKey)
guard let filteredImage = rosyFilter.value(forKey: kCIOutputImageKey) as? CIImage else {
return nil
}
var pbuf: CVPixelBuffer?
CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, outputPixelBufferPool!, &pbuf)
guard let outputPixelBuffer = pbuf else {
return nil
}
ciContext.render(filteredImage, to: outputPixelBuffer, bounds: filteredImage.extent, colorSpace: outputColorSpace)
return outputPixelBuffer
}
}

View File

@ -261,7 +261,7 @@ final class CameraOutput: NSObject {
}
let uniqueId = settings.uniqueID
let photoCapture = PhotoCaptureContext(settings: settings, filter: nil, mirror: mirror)
let photoCapture = PhotoCaptureContext(settings: settings, orientation: orientation, mirror: mirror)
self.photoCaptureRequests[uniqueId] = photoCapture
self.photoOutput.capturePhoto(with: settings, delegate: photoCapture)

View File

@ -1,163 +0,0 @@
import Foundation
import AVKit
import Vision
final class FaceLandmarksDataOutput {
private var ciContext: CIContext?
private var detectionRequests: [VNDetectFaceRectanglesRequest]?
private var trackingRequests: [VNTrackObjectRequest]?
lazy var sequenceRequestHandler = VNSequenceRequestHandler()
var outputFaceObservations: (([VNFaceObservation]) -> Void)?
private var outputColorSpace: CGColorSpace?
private var outputPixelBufferPool: CVPixelBufferPool?
private(set) var outputFormatDescription: CMFormatDescription?
init() {
self.ciContext = CIContext()
self.prepareVisionRequest()
}
fileprivate func prepareVisionRequest() {
var requests = [VNTrackObjectRequest]()
let faceDetectionRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request, error) in
if error != nil {
print("FaceDetection error: \(String(describing: error)).")
}
guard let faceDetectionRequest = request as? VNDetectFaceRectanglesRequest, let results = faceDetectionRequest.results else {
return
}
DispatchQueue.main.async {
for observation in results {
let faceTrackingRequest = VNTrackObjectRequest(detectedObjectObservation: observation)
requests.append(faceTrackingRequest)
}
self.trackingRequests = requests
}
})
self.detectionRequests = [faceDetectionRequest]
self.sequenceRequestHandler = VNSequenceRequestHandler()
}
func exifOrientationForCurrentDeviceOrientation() -> CGImagePropertyOrientation {
return exifOrientationForDeviceOrientation(UIDevice.current.orientation)
}
func process(sampleBuffer: CMSampleBuffer) {
var requestHandlerOptions: [VNImageOption: AnyObject] = [:]
let cameraIntrinsicData = CMGetAttachment(sampleBuffer, key: kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, attachmentModeOut: nil)
if cameraIntrinsicData != nil {
requestHandlerOptions[VNImageOption.cameraIntrinsics] = cameraIntrinsicData
}
guard let inputPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
print("Failed to obtain a CVPixelBuffer for the current output frame.")
return
}
let width = CGFloat(CVPixelBufferGetWidth(inputPixelBuffer))
let height = CGFloat(CVPixelBufferGetHeight(inputPixelBuffer))
if #available(iOS 13.0, *), outputPixelBufferPool == nil, let formatDescription = try? CMFormatDescription(videoCodecType: .pixelFormat_32BGRA, width: Int(width / 3.0), height: Int(height / 3.0)) {
(outputPixelBufferPool,
outputColorSpace,
outputFormatDescription) = allocateOutputBufferPool(with: formatDescription, outputRetainedBufferCountHint: 3)
}
var pbuf: CVPixelBuffer?
CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, outputPixelBufferPool!, &pbuf)
guard let pixelBuffer = pbuf, let ciContext = self.ciContext else {
print("Allocation failure")
return
}
resizePixelBuffer(inputPixelBuffer, width: Int(width / 3.0), height: Int(height / 3.0), output: pixelBuffer, context: ciContext)
let exifOrientation = self.exifOrientationForCurrentDeviceOrientation()
guard let requests = self.trackingRequests, !requests.isEmpty else {
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: exifOrientation, options: requestHandlerOptions)
do {
guard let detectRequests = self.detectionRequests else {
return
}
try imageRequestHandler.perform(detectRequests)
} catch let error as NSError {
print("Failed to perform FaceRectangleRequest: \(String(describing: error)).")
}
return
}
do {
try self.sequenceRequestHandler.perform(requests, on: pixelBuffer, orientation: exifOrientation)
} catch let error as NSError {
print("Failed to perform SequenceRequest: \(String(describing: error)).")
}
var newTrackingRequests = [VNTrackObjectRequest]()
for trackingRequest in requests {
guard let results = trackingRequest.results else {
return
}
guard let observation = results[0] as? VNDetectedObjectObservation else {
return
}
if !trackingRequest.isLastFrame {
if observation.confidence > 0.3 {
trackingRequest.inputObservation = observation
} else {
trackingRequest.isLastFrame = true
}
newTrackingRequests.append(trackingRequest)
}
}
self.trackingRequests = newTrackingRequests
if newTrackingRequests.isEmpty {
DispatchQueue.main.async {
self.outputFaceObservations?([])
}
return
}
var faceLandmarkRequests = [VNDetectFaceLandmarksRequest]()
for trackingRequest in newTrackingRequests {
let faceLandmarksRequest = VNDetectFaceLandmarksRequest(completionHandler: { (request, error) in
if error != nil {
print("FaceLandmarks error: \(String(describing: error)).")
}
guard let landmarksRequest = request as? VNDetectFaceLandmarksRequest, let results = landmarksRequest.results else {
return
}
DispatchQueue.main.async {
self.outputFaceObservations?(results)
}
})
guard let trackingResults = trackingRequest.results else {
return
}
guard let observation = trackingResults[0] as? VNDetectedObjectObservation else {
return
}
let faceObservation = VNFaceObservation(boundingBox: observation.boundingBox)
faceLandmarksRequest.inputFaceObservations = [faceObservation]
faceLandmarkRequests.append(faceLandmarksRequest)
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: exifOrientation, options: requestHandlerOptions)
do {
try imageRequestHandler.perform(faceLandmarkRequests)
} catch let error as NSError {
print("Failed to perform FaceLandmarkRequest: \(String(describing: error)).")
}
}
}
}

View File

@ -34,11 +34,11 @@ public enum PhotoCaptureResult: Equatable {
final class PhotoCaptureContext: NSObject, AVCapturePhotoCaptureDelegate {
private let pipe = ValuePipe<PhotoCaptureResult>()
private let filter: CameraFilter?
private let orientation: AVCaptureVideoOrientation
private let mirror: Bool
init(settings: AVCapturePhotoSettings, filter: CameraFilter?, mirror: Bool) {
self.filter = filter
init(settings: AVCapturePhotoSettings, orientation: AVCaptureVideoOrientation, mirror: Bool) {
self.orientation = orientation
self.mirror = mirror
super.init()
@ -60,25 +60,20 @@ final class PhotoCaptureContext: NSObject, AVCapturePhotoCaptureDelegate {
var photoFormatDescription: CMFormatDescription?
CMVideoFormatDescriptionCreateForImageBuffer(allocator: kCFAllocatorDefault, imageBuffer: photoPixelBuffer, formatDescriptionOut: &photoFormatDescription)
var finalPixelBuffer = photoPixelBuffer
if let filter = self.filter {
if !filter.isPrepared {
if let unwrappedPhotoFormatDescription = photoFormatDescription {
filter.prepare(with: unwrappedPhotoFormatDescription, outputRetainedBufferCountHint: 2)
}
}
guard let filteredPixelBuffer = filter.render(pixelBuffer: finalPixelBuffer) else {
print("Unable to filter photo buffer")
return
}
finalPixelBuffer = filteredPixelBuffer
var orientation: UIImage.Orientation = .right
if self.orientation == .landscapeLeft {
orientation = .down
} else if self.orientation == .landscapeRight {
orientation = .up
} else if self.orientation == .portraitUpsideDown {
orientation = .left
}
let finalPixelBuffer = photoPixelBuffer
let ciContext = CIContext()
let renderedCIImage = CIImage(cvImageBuffer: finalPixelBuffer)
if let cgImage = ciContext.createCGImage(renderedCIImage, from: renderedCIImage.extent) {
var image = UIImage(cgImage: cgImage, scale: 1.0, orientation: .right)
var image = UIImage(cgImage: cgImage, scale: 1.0, orientation: orientation)
if image.imageOrientation != .up {
UIGraphicsBeginImageContextWithOptions(image.size, true, image.scale)
if self.mirror, let context = UIGraphicsGetCurrentContext() {

View File

@ -3799,7 +3799,7 @@ public final class EmojiPagerContentComponent: Component {
}
}
contextGesture.activatedAfterCompletion = { [weak self] point, wasTap in
guard let self, let component = self.component, !self.isSearchActivated else {
guard let self, let component = self.component else {
return
}

View File

@ -1850,10 +1850,12 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
controller.state.privacy = privacy
}
var isFromCamera = false
let isSavingAvailable: Bool
switch subject {
case .image, .video:
isSavingAvailable = !controller.isEditingStory
isFromCamera = true
case .draft:
isSavingAvailable = true
default:
@ -1872,12 +1874,17 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
} else {
mediaEntity.scale = storyDimensions.width / fittedSize.width
}
self.entitiesView.add(mediaEntity, announce: false)
let initialPosition = mediaEntity.position
let initialScale = mediaEntity.scale
let initialRotation = mediaEntity.rotation
if isFromCamera && mediaDimensions.width > mediaDimensions.height {
mediaEntity.scale = storyDimensions.height / fittedSize.height
}
self.entitiesView.add(mediaEntity, announce: false)
if let entityView = self.entitiesView.getView(for: mediaEntity.uuid) as? DrawingMediaEntityView {
self.entitiesView.sendSubviewToBack(entityView)
entityView.previewView = self.previewView