├── .gitignore ├── LICENSE ├── README.md ├── index.js ├── ios └── RCTLFLiveKit │ ├── LFLiveKit.xcodeproj │ ├── project.pbxproj │ ├── xcshareddata │ │ └── xcschemes │ │ │ └── LFLiveKit.xcscheme │ └── xcuserdata │ │ └── onbing.xcuserdatad │ │ └── xcschemes │ │ └── xcschememanagement.plist │ ├── LFLiveKit │ ├── Info.plist │ ├── LFLiveKit.h │ ├── LFLiveSession.h │ ├── LFLiveSession.m │ ├── Vendor │ │ ├── GPUImage │ │ │ ├── GLProgram.h │ │ │ ├── GLProgram.m │ │ │ ├── GPUImage.h │ │ │ ├── GPUImage3x3ConvolutionFilter.h │ │ │ ├── GPUImage3x3ConvolutionFilter.m │ │ │ ├── GPUImage3x3TextureSamplingFilter.h │ │ │ ├── GPUImage3x3TextureSamplingFilter.m │ │ │ ├── GPUImageAdaptiveThresholdFilter.h │ │ │ ├── GPUImageAdaptiveThresholdFilter.m │ │ │ ├── GPUImageAddBlendFilter.h │ │ │ ├── GPUImageAddBlendFilter.m │ │ │ ├── GPUImageAlphaBlendFilter.h │ │ │ ├── GPUImageAlphaBlendFilter.m │ │ │ ├── GPUImageAmatorkaFilter.h │ │ │ ├── GPUImageAmatorkaFilter.m │ │ │ ├── GPUImageAverageColor.h │ │ │ ├── GPUImageAverageColor.m │ │ │ ├── GPUImageAverageLuminanceThresholdFilter.h │ │ │ ├── GPUImageAverageLuminanceThresholdFilter.m │ │ │ ├── GPUImageBilateralFilter.h │ │ │ ├── GPUImageBilateralFilter.m │ │ │ ├── GPUImageBoxBlurFilter.h │ │ │ ├── GPUImageBoxBlurFilter.m │ │ │ ├── GPUImageBrightnessFilter.h │ │ │ ├── GPUImageBrightnessFilter.m │ │ │ ├── GPUImageBuffer.h │ │ │ ├── GPUImageBuffer.m │ │ │ ├── GPUImageBulgeDistortionFilter.h │ │ │ ├── GPUImageBulgeDistortionFilter.m │ │ │ ├── GPUImageCGAColorspaceFilter.h │ │ │ ├── GPUImageCGAColorspaceFilter.m │ │ │ ├── GPUImageCannyEdgeDetectionFilter.h │ │ │ ├── GPUImageCannyEdgeDetectionFilter.m │ │ │ ├── GPUImageChromaKeyBlendFilter.h │ │ │ ├── GPUImageChromaKeyBlendFilter.m │ │ │ ├── GPUImageChromaKeyFilter.h │ │ │ ├── GPUImageChromaKeyFilter.m │ │ │ ├── GPUImageClosingFilter.h │ │ │ ├── GPUImageClosingFilter.m │ │ │ ├── GPUImageColorBlendFilter.h │ │ │ ├── GPUImageColorBlendFilter.m │ │ │ ├── GPUImageColorBurnBlendFilter.h │ │ │ ├── GPUImageColorBurnBlendFilter.m │ │ │ ├── GPUImageColorConversion.h │ │ │ ├── GPUImageColorConversion.m │ │ │ ├── GPUImageColorDodgeBlendFilter.h │ │ │ ├── GPUImageColorDodgeBlendFilter.m │ │ │ ├── GPUImageColorInvertFilter.h │ │ │ ├── GPUImageColorInvertFilter.m │ │ │ ├── GPUImageColorLocalBinaryPatternFilter.h │ │ │ ├── GPUImageColorLocalBinaryPatternFilter.m │ │ │ ├── GPUImageColorMatrixFilter.h │ │ │ ├── GPUImageColorMatrixFilter.m │ │ │ ├── GPUImageColorPackingFilter.h │ │ │ ├── GPUImageColorPackingFilter.m │ │ │ ├── GPUImageColourFASTFeatureDetector.h │ │ │ ├── GPUImageColourFASTFeatureDetector.m │ │ │ ├── GPUImageColourFASTSamplingOperation.h │ │ │ ├── GPUImageColourFASTSamplingOperation.m │ │ │ ├── GPUImageContrastFilter.h │ │ │ ├── GPUImageContrastFilter.m │ │ │ ├── GPUImageCropFilter.h │ │ │ ├── GPUImageCropFilter.m │ │ │ ├── GPUImageCrosshairGenerator.h │ │ │ ├── GPUImageCrosshairGenerator.m │ │ │ ├── GPUImageCrosshatchFilter.h │ │ │ ├── GPUImageCrosshatchFilter.m │ │ │ ├── GPUImageDarkenBlendFilter.h │ │ │ ├── GPUImageDarkenBlendFilter.m │ │ │ ├── GPUImageDifferenceBlendFilter.h │ │ │ ├── GPUImageDifferenceBlendFilter.m │ │ │ ├── GPUImageDilationFilter.h │ │ │ ├── GPUImageDilationFilter.m │ │ │ ├── GPUImageDirectionalNonMaximumSuppressionFilter.h │ │ │ ├── GPUImageDirectionalNonMaximumSuppressionFilter.m │ │ │ ├── GPUImageDirectionalSobelEdgeDetectionFilter.h │ │ │ ├── GPUImageDirectionalSobelEdgeDetectionFilter.m │ │ │ ├── GPUImageDissolveBlendFilter.h │ │ │ ├── GPUImageDissolveBlendFilter.m │ │ │ ├── GPUImageDivideBlendFilter.h │ │ │ ├── GPUImageDivideBlendFilter.m │ │ │ ├── GPUImageEmbossFilter.h │ │ │ ├── GPUImageEmbossFilter.m │ │ │ ├── GPUImageErosionFilter.h │ │ │ ├── GPUImageErosionFilter.m │ │ │ ├── GPUImageExclusionBlendFilter.h │ │ │ ├── GPUImageExclusionBlendFilter.m │ │ │ ├── GPUImageExposureFilter.h │ │ │ ├── GPUImageExposureFilter.m │ │ │ ├── GPUImageFASTCornerDetectionFilter.h │ │ │ ├── GPUImageFASTCornerDetectionFilter.m │ │ │ ├── GPUImageFalseColorFilter.h │ │ │ ├── GPUImageFalseColorFilter.m │ │ │ ├── GPUImageFilter.h │ │ │ ├── GPUImageFilter.m │ │ │ ├── GPUImageFilterGroup.h │ │ │ ├── GPUImageFilterGroup.m │ │ │ ├── GPUImageFilterPipeline.h │ │ │ ├── GPUImageFilterPipeline.m │ │ │ ├── GPUImageFourInputFilter.h │ │ │ ├── GPUImageFourInputFilter.m │ │ │ ├── GPUImageFramebuffer.h │ │ │ ├── GPUImageFramebuffer.m │ │ │ ├── GPUImageFramebufferCache.h │ │ │ ├── GPUImageFramebufferCache.m │ │ │ ├── GPUImageGammaFilter.h │ │ │ ├── GPUImageGammaFilter.m │ │ │ ├── GPUImageGaussianBlurFilter.h │ │ │ ├── GPUImageGaussianBlurFilter.m │ │ │ ├── GPUImageGaussianBlurPositionFilter.h │ │ │ ├── GPUImageGaussianBlurPositionFilter.m │ │ │ ├── GPUImageGaussianSelectiveBlurFilter.h │ │ │ ├── GPUImageGaussianSelectiveBlurFilter.m │ │ │ ├── GPUImageGlassSphereFilter.h │ │ │ ├── GPUImageGlassSphereFilter.m │ │ │ ├── GPUImageGrayscaleFilter.h │ │ │ ├── GPUImageGrayscaleFilter.m │ │ │ ├── GPUImageHSBFilter.h │ │ │ ├── GPUImageHSBFilter.m │ │ │ ├── GPUImageHalftoneFilter.h │ │ │ ├── GPUImageHalftoneFilter.m │ │ │ ├── GPUImageHardLightBlendFilter.h │ │ │ ├── GPUImageHardLightBlendFilter.m │ │ │ ├── GPUImageHarrisCornerDetectionFilter.h │ │ │ ├── GPUImageHarrisCornerDetectionFilter.m │ │ │ ├── GPUImageHazeFilter.h │ │ │ ├── GPUImageHazeFilter.m │ │ │ ├── GPUImageHighPassFilter.h │ │ │ ├── GPUImageHighPassFilter.m │ │ │ ├── GPUImageHighlightShadowFilter.h │ │ │ ├── GPUImageHighlightShadowFilter.m │ │ │ ├── GPUImageHighlightShadowTintFilter.h │ │ │ ├── GPUImageHighlightShadowTintFilter.m │ │ │ ├── GPUImageHistogramEqualizationFilter.h │ │ │ ├── GPUImageHistogramEqualizationFilter.m │ │ │ ├── GPUImageHistogramFilter.h │ │ │ ├── GPUImageHistogramFilter.m │ │ │ ├── GPUImageHistogramGenerator.h │ │ │ ├── GPUImageHistogramGenerator.m │ │ │ ├── GPUImageHoughTransformLineDetector.h │ │ │ ├── GPUImageHoughTransformLineDetector.m │ │ │ ├── GPUImageHueBlendFilter.h │ │ │ ├── GPUImageHueBlendFilter.m │ │ │ ├── GPUImageHueFilter.h │ │ │ ├── GPUImageHueFilter.m │ │ │ ├── GPUImageJFAVoronoiFilter.h │ │ │ ├── GPUImageJFAVoronoiFilter.m │ │ │ ├── GPUImageKuwaharaFilter.h │ │ │ ├── GPUImageKuwaharaFilter.m │ │ │ ├── GPUImageKuwaharaRadius3Filter.h │ │ │ ├── GPUImageKuwaharaRadius3Filter.m │ │ │ ├── GPUImageLanczosResamplingFilter.h │ │ │ ├── GPUImageLanczosResamplingFilter.m │ │ │ ├── GPUImageLaplacianFilter.h │ │ │ ├── GPUImageLaplacianFilter.m │ │ │ ├── GPUImageLevelsFilter.h │ │ │ ├── GPUImageLevelsFilter.m │ │ │ ├── GPUImageLightenBlendFilter.h │ │ │ ├── GPUImageLightenBlendFilter.m │ │ │ ├── GPUImageLineGenerator.h │ │ │ ├── GPUImageLineGenerator.m │ │ │ ├── GPUImageLinearBurnBlendFilter.h │ │ │ ├── GPUImageLinearBurnBlendFilter.m │ │ │ ├── GPUImageLocalBinaryPatternFilter.h │ │ │ ├── GPUImageLocalBinaryPatternFilter.m │ │ │ ├── GPUImageLookupFilter.h │ │ │ ├── GPUImageLookupFilter.m │ │ │ ├── GPUImageLowPassFilter.h │ │ │ ├── GPUImageLowPassFilter.m │ │ │ ├── GPUImageLuminanceRangeFilter.h │ │ │ ├── GPUImageLuminanceRangeFilter.m │ │ │ ├── GPUImageLuminanceThresholdFilter.h │ │ │ ├── GPUImageLuminanceThresholdFilter.m │ │ │ ├── GPUImageLuminosity.h │ │ │ ├── GPUImageLuminosity.m │ │ │ ├── GPUImageLuminosityBlendFilter.h │ │ │ ├── GPUImageLuminosityBlendFilter.m │ │ │ ├── GPUImageMaskFilter.h │ │ │ ├── GPUImageMaskFilter.m │ │ │ ├── GPUImageMedianFilter.h │ │ │ ├── GPUImageMedianFilter.m │ │ │ ├── GPUImageMissEtikateFilter.h │ │ │ ├── GPUImageMissEtikateFilter.m │ │ │ ├── GPUImageMonochromeFilter.h │ │ │ ├── GPUImageMonochromeFilter.m │ │ │ ├── GPUImageMosaicFilter.h │ │ │ ├── GPUImageMosaicFilter.m │ │ │ ├── GPUImageMotionBlurFilter.h │ │ │ ├── GPUImageMotionBlurFilter.m │ │ │ ├── GPUImageMotionDetector.h │ │ │ ├── GPUImageMotionDetector.m │ │ │ ├── GPUImageMovie.h │ │ │ ├── GPUImageMovie.m │ │ │ ├── GPUImageMovieComposition.h │ │ │ ├── GPUImageMovieComposition.m │ │ │ ├── GPUImageMultiplyBlendFilter.h │ │ │ ├── GPUImageMultiplyBlendFilter.m │ │ │ ├── GPUImageNobleCornerDetectionFilter.h │ │ │ ├── GPUImageNobleCornerDetectionFilter.m │ │ │ ├── GPUImageNonMaximumSuppressionFilter.h │ │ │ ├── GPUImageNonMaximumSuppressionFilter.m │ │ │ ├── GPUImageNormalBlendFilter.h │ │ │ ├── GPUImageNormalBlendFilter.m │ │ │ ├── GPUImageOpacityFilter.h │ │ │ ├── GPUImageOpacityFilter.m │ │ │ ├── GPUImageOpeningFilter.h │ │ │ ├── GPUImageOpeningFilter.m │ │ │ ├── GPUImageOutput.h │ │ │ ├── GPUImageOutput.m │ │ │ ├── GPUImageOverlayBlendFilter.h │ │ │ ├── GPUImageOverlayBlendFilter.m │ │ │ ├── GPUImageParallelCoordinateLineTransformFilter.h │ │ │ ├── GPUImageParallelCoordinateLineTransformFilter.m │ │ │ ├── GPUImagePerlinNoiseFilter.h │ │ │ ├── GPUImagePerlinNoiseFilter.m │ │ │ ├── GPUImagePinchDistortionFilter.h │ │ │ ├── GPUImagePinchDistortionFilter.m │ │ │ ├── GPUImagePixellateFilter.h │ │ │ ├── GPUImagePixellateFilter.m │ │ │ ├── GPUImagePixellatePositionFilter.h │ │ │ ├── GPUImagePixellatePositionFilter.m │ │ │ ├── GPUImagePoissonBlendFilter.h │ │ │ ├── GPUImagePoissonBlendFilter.m │ │ │ ├── GPUImagePolarPixellateFilter.h │ │ │ ├── GPUImagePolarPixellateFilter.m │ │ │ ├── GPUImagePolkaDotFilter.h │ │ │ ├── GPUImagePolkaDotFilter.m │ │ │ ├── GPUImagePosterizeFilter.h │ │ │ ├── GPUImagePosterizeFilter.m │ │ │ ├── GPUImagePrewittEdgeDetectionFilter.h │ │ │ ├── GPUImagePrewittEdgeDetectionFilter.m │ │ │ ├── GPUImageRGBClosingFilter.h │ │ │ ├── GPUImageRGBClosingFilter.m │ │ │ ├── GPUImageRGBDilationFilter.h │ │ │ ├── GPUImageRGBDilationFilter.m │ │ │ ├── GPUImageRGBErosionFilter.h │ │ │ ├── GPUImageRGBErosionFilter.m │ │ │ ├── GPUImageRGBFilter.h │ │ │ ├── GPUImageRGBFilter.m │ │ │ ├── GPUImageRGBOpeningFilter.h │ │ │ ├── GPUImageRGBOpeningFilter.m │ │ │ ├── GPUImageRawDataInput.h │ │ │ ├── GPUImageRawDataInput.m │ │ │ ├── GPUImageRawDataOutput.h │ │ │ ├── GPUImageRawDataOutput.m │ │ │ ├── GPUImageSaturationBlendFilter.h │ │ │ ├── GPUImageSaturationBlendFilter.m │ │ │ ├── GPUImageSaturationFilter.h │ │ │ ├── GPUImageSaturationFilter.m │ │ │ ├── GPUImageScreenBlendFilter.h │ │ │ ├── GPUImageScreenBlendFilter.m │ │ │ ├── GPUImageSepiaFilter.h │ │ │ ├── GPUImageSepiaFilter.m │ │ │ ├── GPUImageSharpenFilter.h │ │ │ ├── GPUImageSharpenFilter.m │ │ │ ├── GPUImageShiTomasiFeatureDetectionFilter.h │ │ │ ├── GPUImageShiTomasiFeatureDetectionFilter.m │ │ │ ├── GPUImageSingleComponentGaussianBlurFilter.h │ │ │ ├── GPUImageSingleComponentGaussianBlurFilter.m │ │ │ ├── GPUImageSketchFilter.h │ │ │ ├── GPUImageSketchFilter.m │ │ │ ├── GPUImageSkinToneFilter.h │ │ │ ├── GPUImageSkinToneFilter.m │ │ │ ├── GPUImageSmoothToonFilter.h │ │ │ ├── GPUImageSmoothToonFilter.m │ │ │ ├── GPUImageSobelEdgeDetectionFilter.h │ │ │ ├── GPUImageSobelEdgeDetectionFilter.m │ │ │ ├── GPUImageSoftEleganceFilter.h │ │ │ ├── GPUImageSoftEleganceFilter.m │ │ │ ├── GPUImageSoftLightBlendFilter.h │ │ │ ├── GPUImageSoftLightBlendFilter.m │ │ │ ├── GPUImageSolarizeFilter.h │ │ │ ├── GPUImageSolarizeFilter.m │ │ │ ├── GPUImageSolidColorGenerator.h │ │ │ ├── GPUImageSolidColorGenerator.m │ │ │ ├── GPUImageSourceOverBlendFilter.h │ │ │ ├── GPUImageSourceOverBlendFilter.m │ │ │ ├── GPUImageSphereRefractionFilter.h │ │ │ ├── GPUImageSphereRefractionFilter.m │ │ │ ├── GPUImageStillCamera.h │ │ │ ├── GPUImageStillCamera.m │ │ │ ├── GPUImageStretchDistortionFilter.h │ │ │ ├── GPUImageStretchDistortionFilter.m │ │ │ ├── GPUImageSubtractBlendFilter.h │ │ │ ├── GPUImageSubtractBlendFilter.m │ │ │ ├── GPUImageSwirlFilter.h │ │ │ ├── GPUImageSwirlFilter.m │ │ │ ├── GPUImageTextureInput.h │ │ │ ├── GPUImageTextureInput.m │ │ │ ├── GPUImageTextureOutput.h │ │ │ ├── GPUImageTextureOutput.m │ │ │ ├── GPUImageThreeInputFilter.h │ │ │ ├── GPUImageThreeInputFilter.m │ │ │ ├── GPUImageThresholdEdgeDetectionFilter.h │ │ │ ├── GPUImageThresholdEdgeDetectionFilter.m │ │ │ ├── GPUImageThresholdSketchFilter.h │ │ │ ├── GPUImageThresholdSketchFilter.m │ │ │ ├── GPUImageThresholdedNonMaximumSuppressionFilter.h │ │ │ ├── GPUImageThresholdedNonMaximumSuppressionFilter.m │ │ │ ├── GPUImageTiltShiftFilter.h │ │ │ ├── GPUImageTiltShiftFilter.m │ │ │ ├── GPUImageToneCurveFilter.h │ │ │ ├── GPUImageToneCurveFilter.m │ │ │ ├── GPUImageToonFilter.h │ │ │ ├── GPUImageToonFilter.m │ │ │ ├── GPUImageTransformFilter.h │ │ │ ├── GPUImageTransformFilter.m │ │ │ ├── GPUImageTwoInputCrossTextureSamplingFilter.h │ │ │ ├── GPUImageTwoInputCrossTextureSamplingFilter.m │ │ │ ├── GPUImageTwoInputFilter.h │ │ │ ├── GPUImageTwoInputFilter.m │ │ │ ├── GPUImageTwoPassFilter.h │ │ │ ├── GPUImageTwoPassFilter.m │ │ │ ├── GPUImageTwoPassTextureSamplingFilter.h │ │ │ ├── GPUImageTwoPassTextureSamplingFilter.m │ │ │ ├── GPUImageUIElement.h │ │ │ ├── GPUImageUIElement.m │ │ │ ├── GPUImageUnsharpMaskFilter.h │ │ │ ├── GPUImageUnsharpMaskFilter.m │ │ │ ├── GPUImageVideoCamera.h │ │ │ ├── GPUImageVideoCamera.m │ │ │ ├── GPUImageVignetteFilter.h │ │ │ ├── GPUImageVignetteFilter.m │ │ │ ├── GPUImageVoronoiConsumerFilter.h │ │ │ ├── GPUImageVoronoiConsumerFilter.m │ │ │ ├── GPUImageWeakPixelInclusionFilter.h │ │ │ ├── GPUImageWeakPixelInclusionFilter.m │ │ │ ├── GPUImageWhiteBalanceFilter.h │ │ │ ├── GPUImageWhiteBalanceFilter.m │ │ │ ├── GPUImageXYDerivativeFilter.h │ │ │ ├── GPUImageXYDerivativeFilter.m │ │ │ ├── GPUImageZoomBlurFilter.h │ │ │ ├── GPUImageZoomBlurFilter.m │ │ │ ├── GPUImageiOSBlurFilter.h │ │ │ ├── GPUImageiOSBlurFilter.m │ │ │ └── iOS │ │ │ │ ├── Framework │ │ │ │ └── GPUImageFramework.h │ │ │ │ ├── GPUImageContext.h │ │ │ │ ├── GPUImageContext.m │ │ │ │ ├── GPUImageMovieWriter.h │ │ │ │ ├── GPUImageMovieWriter.m │ │ │ │ ├── GPUImagePicture+TextureSubimage.h │ │ │ │ ├── GPUImagePicture+TextureSubimage.m │ │ │ │ ├── GPUImagePicture.h │ │ │ │ ├── GPUImagePicture.m │ │ │ │ ├── GPUImageView.h │ │ │ │ └── GPUImageView.m │ │ └── pili-librtmp │ │ │ ├── amf.c │ │ │ ├── amf.h │ │ │ ├── bytes.h │ │ │ ├── dh.h │ │ │ ├── dhgroups.h │ │ │ ├── error.c │ │ │ ├── error.h │ │ │ ├── handshake.h │ │ │ ├── hashswf.c │ │ │ ├── http.h │ │ │ ├── log.c │ │ │ ├── log.h │ │ │ ├── parseurl.c │ │ │ ├── rtmp.c │ │ │ ├── rtmp.h │ │ │ └── rtmp_sys.h │ ├── capture │ │ ├── LFAudioCapture.h │ │ ├── LFAudioCapture.m │ │ ├── LFVideoCapture.h │ │ └── LFVideoCapture.m │ ├── coder │ │ ├── H264 │ │ │ ├── LFAVEncoder.h │ │ │ ├── LFAVEncoder.mm │ │ │ ├── LFMP4Atom.h │ │ │ ├── LFMP4Atom.m │ │ │ ├── LFNALUnit.cpp │ │ │ ├── LFNALUnit.h │ │ │ ├── LFVideoEncoder.h │ │ │ ├── LFVideoEncoder.m │ │ │ └── LICENSE.markdown │ │ ├── LFAudioEncoding.h │ │ ├── LFH264VideoEncoder.h │ │ ├── LFH264VideoEncoder.mm │ │ ├── LFHardwareAudioEncoder.h │ │ ├── LFHardwareAudioEncoder.m │ │ ├── LFHardwareVideoEncoder.h │ │ ├── LFHardwareVideoEncoder.m │ │ └── LFVideoEncoding.h │ ├── configuration │ │ ├── LFLiveAudioConfiguration.h │ │ ├── LFLiveAudioConfiguration.m │ │ ├── LFLiveVideoConfiguration.h │ │ └── LFLiveVideoConfiguration.m │ ├── filter │ │ ├── LFGPUImageBeautyFilter.h │ │ ├── LFGPUImageBeautyFilter.m │ │ ├── LFGPUImageEmptyFilter.h │ │ └── LFGPUImageEmptyFilter.m │ ├── objects │ │ ├── LFAudioFrame.h │ │ ├── LFAudioFrame.m │ │ ├── LFFrame.h │ │ ├── LFFrame.m │ │ ├── LFLiveDebug.h │ │ ├── LFLiveDebug.m │ │ ├── LFLiveStreamInfo.h │ │ ├── LFLiveStreamInfo.m │ │ ├── LFVideoFrame.h │ │ └── LFVideoFrame.m │ └── publish │ │ ├── LFStreamRTMPSocket.h │ │ ├── LFStreamRTMPSocket.m │ │ ├── LFStreamSocket.h │ │ ├── LFStreamingBuffer.h │ │ ├── LFStreamingBuffer.m │ │ ├── NSMutableArray+LFAdd.h │ │ └── NSMutableArray+LFAdd.m │ ├── RCTLFLiveKit.xcodeproj │ ├── project.pbxproj │ ├── project.xcworkspace │ │ └── contents.xcworkspacedata │ └── xcuserdata │ │ └── onbing.xcuserdatad │ │ └── xcschemes │ │ ├── RCTLFLiveKit.xcscheme │ │ └── xcschememanagement.plist │ └── RCTLFLiveKit │ ├── RCTStream.h │ ├── RCTStream.m │ ├── RCTStreamManager.h │ └── RCTStreamManager.m └── package.json /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | 3 | ## IDE 4 | .idea 5 | .vscode 6 | 7 | ## Build generated 8 | build/ 9 | 10 | ## Other 11 | *.moved-aside 12 | *.xcuserstate 13 | 14 | ## Obj-C/Swift specific 15 | *.hmap 16 | *.ipa 17 | *.dSYM.zip 18 | *.dSYM 19 | 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 晏兵兵 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rn-live-stream 2 | 3 | [![NPM Version][npm-image]][npm-url] 4 | 5 | # Info 6 | This library [LFLiveKit](https://github.com/LaiFengiOS/LFLiveKit/issues) 7 | 8 | Only Support iOS 9 | 10 | ## Add it to your project 11 | 12 | Run `npm install --save rn-live-stream` 13 | 14 | ### iOS 15 | 16 | 1. Add RCTLFLiveKit.xcodeproj to Libraries 17 | 18 | 2. Your project Click 19 | 20 | 3. Go to General -> Embedded Binaries and add LFLiveKit.framework 21 | 22 | 4. Linked Frameworks and Libraries add LFLiveKit.framework 23 | 24 | ## Usage 25 | 26 | ```javascript 27 | import LiveStream from 'rn-live-stream'; 28 | 29 | {}} // streaming ready 35 | onPending={() => {}} // streaming ready to start 36 | onStart={() => {}} // streaming start 37 | onError={() => {}} // straming error 38 | onStop={() => {}} // streaming stop 39 | /> 40 | ``` 41 | 42 | # License 43 | MIT 44 | 45 | [npm-image]: https://img.shields.io/npm/v/rn-live-stream.svg 46 | [npm-url]: https://www.npmjs.com/package/rn-live-stream 47 | 48 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit.xcodeproj/xcuserdata/onbing.xcuserdatad/xcschemes/xcschememanagement.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | SchemeUserState 6 | 7 | LFLiveKit.xcscheme_^#shared#^_ 8 | 9 | orderHint 10 | 0 11 | 12 | 13 | SuppressBuildableAutocreation 14 | 15 | 84001F891D0015D10026C63F 16 | 17 | primary 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Info.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CFBundleDevelopmentRegion 6 | en 7 | CFBundleExecutable 8 | $(EXECUTABLE_NAME) 9 | CFBundleIdentifier 10 | $(PRODUCT_BUNDLE_IDENTIFIER) 11 | CFBundleInfoDictionaryVersion 12 | 6.0 13 | CFBundleName 14 | $(PRODUCT_NAME) 15 | CFBundlePackageType 16 | FMWK 17 | CFBundleShortVersionString 18 | 2.0 19 | CFBundleSignature 20 | ???? 21 | CFBundleVersion 22 | $(CURRENT_PROJECT_VERSION) 23 | NSPrincipalClass 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/LFLiveKit.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFLiveKit.h 3 | // LFLiveKit 4 | // 5 | // Created by admin on 16/5/24. 6 | // Copyright © 2016年 admin. All rights reserved. 7 | // 8 | 9 | #import "LFLiveSession.h" 10 | #import "LFLiveAudioConfiguration.h" 11 | #import "LFLiveVideoConfiguration.h" 12 | #import "LFAudioFrame.h" 13 | #import "LFFrame.h" 14 | #import "LFLiveStreamInfo.h" 15 | #import "LFVideoFrame.h" 16 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GLProgram.h: -------------------------------------------------------------------------------- 1 | // This is Jeff LaMarche's GLProgram OpenGL shader wrapper class from his OpenGL ES 2.0 book. 2 | // A description of this can be found at his page on the topic: 3 | // http://iphonedevelopment.blogspot.com/2010/11/opengl-es-20-for-ios-chapter-4.html 4 | // I've extended this to be able to take programs as NSStrings in addition to files, for baked-in shaders 5 | 6 | #import 7 | 8 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 9 | #import 10 | #import 11 | #else 12 | #import 13 | #import 14 | #endif 15 | 16 | @interface GLProgram : NSObject 17 | { 18 | NSMutableArray *attributes; 19 | NSMutableArray *uniforms; 20 | GLuint program, 21 | vertShader, 22 | fragShader; 23 | } 24 | 25 | @property(readwrite, nonatomic) BOOL initialized; 26 | @property(readwrite, copy, nonatomic) NSString *vertexShaderLog; 27 | @property(readwrite, copy, nonatomic) NSString *fragmentShaderLog; 28 | @property(readwrite, copy, nonatomic) NSString *programLog; 29 | 30 | - (id)initWithVertexShaderString:(NSString *)vShaderString 31 | fragmentShaderString:(NSString *)fShaderString; 32 | - (id)initWithVertexShaderString:(NSString *)vShaderString 33 | fragmentShaderFilename:(NSString *)fShaderFilename; 34 | - (id)initWithVertexShaderFilename:(NSString *)vShaderFilename 35 | fragmentShaderFilename:(NSString *)fShaderFilename; 36 | - (void)addAttribute:(NSString *)attributeName; 37 | - (GLuint)attributeIndex:(NSString *)attributeName; 38 | - (GLuint)uniformIndex:(NSString *)uniformName; 39 | - (BOOL)link; 40 | - (void)use; 41 | - (void)validate; 42 | @end 43 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImage3x3ConvolutionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImage3x3TextureSamplingFilter.h" 2 | 3 | /** Runs a 3x3 convolution kernel against the image 4 | */ 5 | @interface GPUImage3x3ConvolutionFilter : GPUImage3x3TextureSamplingFilter 6 | { 7 | GLint convolutionMatrixUniform; 8 | } 9 | 10 | /** Convolution kernel to run against the image 11 | 12 | The convolution kernel is a 3x3 matrix of values to apply to the pixel and its 8 surrounding pixels. 13 | The matrix is specified in row-major order, with the top left pixel being one.one and the bottom right three.three 14 | If the values in the matrix don't add up to 1.0, the image could be brightened or darkened. 15 | */ 16 | @property(readwrite, nonatomic) GPUMatrix3x3 convolutionKernel; 17 | 18 | @end 19 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImage3x3TextureSamplingFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | extern NSString *const kGPUImageNearbyTexelSamplingVertexShaderString; 4 | 5 | @interface GPUImage3x3TextureSamplingFilter : GPUImageFilter 6 | { 7 | GLint texelWidthUniform, texelHeightUniform; 8 | 9 | CGFloat texelWidth, texelHeight; 10 | BOOL hasOverriddenImageSizeFactor; 11 | } 12 | 13 | // The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects. 14 | @property(readwrite, nonatomic) CGFloat texelWidth; 15 | @property(readwrite, nonatomic) CGFloat texelHeight; 16 | 17 | 18 | @end 19 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageAdaptiveThresholdFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @interface GPUImageAdaptiveThresholdFilter : GPUImageFilterGroup 4 | 5 | /** A multiplier for the background averaging blur radius in pixels, with a default of 4 6 | */ 7 | @property(readwrite, nonatomic) CGFloat blurRadiusInPixels; 8 | 9 | @end 10 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageAddBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageAddBlendFilter : GPUImageTwoInputFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageAlphaBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageAlphaBlendFilter : GPUImageTwoInputFilter 4 | { 5 | GLint mixUniform; 6 | } 7 | 8 | // Mix ranges from 0.0 (only image 1) to 1.0 (only image 2), with 1.0 as the normal level 9 | @property(readwrite, nonatomic) CGFloat mix; 10 | 11 | @end 12 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageAlphaBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageAlphaBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageAlphaBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | uniform lowp float mixturePercent; 13 | 14 | void main() 15 | { 16 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 17 | lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 18 | 19 | gl_FragColor = vec4(mix(textureColor.rgb, textureColor2.rgb, textureColor2.a * mixturePercent), textureColor.a); 20 | } 21 | ); 22 | #else 23 | NSString *const kGPUImageAlphaBlendFragmentShaderString = SHADER_STRING 24 | ( 25 | varying vec2 textureCoordinate; 26 | varying vec2 textureCoordinate2; 27 | 28 | uniform sampler2D inputImageTexture; 29 | uniform sampler2D inputImageTexture2; 30 | 31 | uniform float mixturePercent; 32 | 33 | void main() 34 | { 35 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 36 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 37 | 38 | gl_FragColor = vec4(mix(textureColor.rgb, textureColor2.rgb, textureColor2.a * mixturePercent), textureColor.a); 39 | } 40 | ); 41 | #endif 42 | 43 | @implementation GPUImageAlphaBlendFilter 44 | 45 | @synthesize mix = _mix; 46 | 47 | - (id)init; 48 | { 49 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageAlphaBlendFragmentShaderString])) 50 | { 51 | return nil; 52 | } 53 | 54 | mixUniform = [filterProgram uniformIndex:@"mixturePercent"]; 55 | self.mix = 0.5; 56 | 57 | return self; 58 | } 59 | 60 | 61 | #pragma mark - 62 | #pragma mark Accessors 63 | 64 | - (void)setMix:(CGFloat)newValue; 65 | { 66 | _mix = newValue; 67 | 68 | [self setFloat:_mix forUniform:mixUniform program:filterProgram]; 69 | } 70 | 71 | 72 | @end 73 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageAmatorkaFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImagePicture; 4 | 5 | /** A photo filter based on Photoshop action by Amatorka 6 | http://amatorka.deviantart.com/art/Amatorka-Action-2-121069631 7 | */ 8 | 9 | // Note: If you want to use this effect you have to add lookup_amatorka.png 10 | // from Resources folder to your application bundle. 11 | 12 | @interface GPUImageAmatorkaFilter : GPUImageFilterGroup 13 | { 14 | GPUImagePicture *lookupImageSource; 15 | } 16 | 17 | @end 18 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageAmatorkaFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageAmatorkaFilter.h" 2 | #import "GPUImagePicture.h" 3 | #import "GPUImageLookupFilter.h" 4 | 5 | @implementation GPUImageAmatorkaFilter 6 | 7 | - (id)init; 8 | { 9 | if (!(self = [super init])) 10 | { 11 | return nil; 12 | } 13 | 14 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 15 | UIImage *image = [UIImage imageNamed:@"lookup_amatorka.png"]; 16 | #else 17 | NSImage *image = [NSImage imageNamed:@"lookup_amatorka.png"]; 18 | #endif 19 | 20 | NSAssert(image, @"To use GPUImageAmatorkaFilter you need to add lookup_amatorka.png from GPUImage/framework/Resources to your application bundle."); 21 | 22 | lookupImageSource = [[GPUImagePicture alloc] initWithImage:image]; 23 | GPUImageLookupFilter *lookupFilter = [[GPUImageLookupFilter alloc] init]; 24 | [self addFilter:lookupFilter]; 25 | 26 | [lookupImageSource addTarget:lookupFilter atTextureLocation:1]; 27 | [lookupImageSource processImage]; 28 | 29 | self.initialFilters = [NSArray arrayWithObjects:lookupFilter, nil]; 30 | self.terminalFilter = lookupFilter; 31 | 32 | return self; 33 | } 34 | 35 | #pragma mark - 36 | #pragma mark Accessors 37 | 38 | @end 39 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageAverageColor.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | extern NSString *const kGPUImageColorAveragingVertexShaderString; 4 | 5 | @interface GPUImageAverageColor : GPUImageFilter 6 | { 7 | GLint texelWidthUniform, texelHeightUniform; 8 | 9 | NSUInteger numberOfStages; 10 | 11 | GLubyte *rawImagePixels; 12 | CGSize finalStageSize; 13 | } 14 | 15 | // This block is called on the completion of color averaging for a frame 16 | @property(nonatomic, copy) void(^colorAverageProcessingFinishedBlock)(CGFloat redComponent, CGFloat greenComponent, CGFloat blueComponent, CGFloat alphaComponent, CMTime frameTime); 17 | 18 | - (void)extractAverageColorAtFrameTime:(CMTime)frameTime; 19 | 20 | @end 21 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageAverageLuminanceThresholdFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @interface GPUImageAverageLuminanceThresholdFilter : GPUImageFilterGroup 4 | 5 | // This is multiplied by the continually calculated average image luminosity to arrive at the final threshold. Default is 1.0. 6 | @property(readwrite, nonatomic) CGFloat thresholdMultiplier; 7 | 8 | @end 9 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageAverageLuminanceThresholdFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageAverageLuminanceThresholdFilter.h" 2 | #import "GPUImageLuminosity.h" 3 | #import "GPUImageLuminanceThresholdFilter.h" 4 | 5 | @interface GPUImageAverageLuminanceThresholdFilter() 6 | { 7 | GPUImageLuminosity *luminosityFilter; 8 | GPUImageLuminanceThresholdFilter *luminanceThresholdFilter; 9 | } 10 | @end 11 | 12 | @implementation GPUImageAverageLuminanceThresholdFilter 13 | 14 | @synthesize thresholdMultiplier = _thresholdMultiplier; 15 | 16 | #pragma mark - 17 | #pragma mark Initialization and teardown 18 | 19 | - (id)init; 20 | { 21 | if (!(self = [super init])) 22 | { 23 | return nil; 24 | } 25 | 26 | self.thresholdMultiplier = 1.0; 27 | 28 | luminosityFilter = [[GPUImageLuminosity alloc] init]; 29 | [self addFilter:luminosityFilter]; 30 | 31 | luminanceThresholdFilter = [[GPUImageLuminanceThresholdFilter alloc] init]; 32 | [self addFilter:luminanceThresholdFilter]; 33 | 34 | __unsafe_unretained GPUImageAverageLuminanceThresholdFilter *weakSelf = self; 35 | __unsafe_unretained GPUImageLuminanceThresholdFilter *weakThreshold = luminanceThresholdFilter; 36 | 37 | [luminosityFilter setLuminosityProcessingFinishedBlock:^(CGFloat luminosity, CMTime frameTime) { 38 | weakThreshold.threshold = luminosity * weakSelf.thresholdMultiplier; 39 | }]; 40 | 41 | self.initialFilters = [NSArray arrayWithObjects:luminosityFilter, luminanceThresholdFilter, nil]; 42 | self.terminalFilter = luminanceThresholdFilter; 43 | 44 | return self; 45 | } 46 | 47 | @end 48 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageBilateralFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageGaussianBlurFilter.h" 2 | 3 | @interface GPUImageBilateralFilter : GPUImageGaussianBlurFilter 4 | { 5 | CGFloat firstDistanceNormalizationFactorUniform; 6 | CGFloat secondDistanceNormalizationFactorUniform; 7 | } 8 | // A normalization factor for the distance between central color and sample color. 9 | @property(nonatomic, readwrite) CGFloat distanceNormalizationFactor; 10 | @end 11 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageBoxBlurFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageGaussianBlurFilter.h" 2 | 3 | /** A hardware-accelerated box blur of an image 4 | */ 5 | @interface GPUImageBoxBlurFilter : GPUImageGaussianBlurFilter 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageBrightnessFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageBrightnessFilter : GPUImageFilter 4 | { 5 | GLint brightnessUniform; 6 | } 7 | 8 | // Brightness ranges from -1.0 to 1.0, with 0.0 as the normal level 9 | @property(readwrite, nonatomic) CGFloat brightness; 10 | 11 | @end 12 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageBrightnessFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageBrightnessFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageBrightnessFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | 8 | uniform sampler2D inputImageTexture; 9 | uniform lowp float brightness; 10 | 11 | void main() 12 | { 13 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 14 | 15 | gl_FragColor = vec4((textureColor.rgb + vec3(brightness)), textureColor.w); 16 | } 17 | ); 18 | #else 19 | NSString *const kGPUImageBrightnessFragmentShaderString = SHADER_STRING 20 | ( 21 | varying vec2 textureCoordinate; 22 | 23 | uniform sampler2D inputImageTexture; 24 | uniform float brightness; 25 | 26 | void main() 27 | { 28 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 29 | 30 | gl_FragColor = vec4((textureColor.rgb + vec3(brightness)), textureColor.w); 31 | } 32 | ); 33 | #endif 34 | 35 | @implementation GPUImageBrightnessFilter 36 | 37 | @synthesize brightness = _brightness; 38 | 39 | #pragma mark - 40 | #pragma mark Initialization and teardown 41 | 42 | - (id)init; 43 | { 44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageBrightnessFragmentShaderString])) 45 | { 46 | return nil; 47 | } 48 | 49 | brightnessUniform = [filterProgram uniformIndex:@"brightness"]; 50 | self.brightness = 0.0; 51 | 52 | return self; 53 | } 54 | 55 | #pragma mark - 56 | #pragma mark Accessors 57 | 58 | - (void)setBrightness:(CGFloat)newValue; 59 | { 60 | _brightness = newValue; 61 | 62 | [self setFloat:_brightness forUniform:brightnessUniform program:filterProgram]; 63 | } 64 | 65 | @end 66 | 67 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageBuffer.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageBuffer : GPUImageFilter 4 | { 5 | NSMutableArray *bufferedFramebuffers; 6 | } 7 | 8 | @property(readwrite, nonatomic) NSUInteger bufferSize; 9 | 10 | @end 11 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageBulgeDistortionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /// Creates a bulge distortion on the image 4 | @interface GPUImageBulgeDistortionFilter : GPUImageFilter 5 | { 6 | GLint aspectRatioUniform, radiusUniform, centerUniform, scaleUniform; 7 | } 8 | 9 | /// The center about which to apply the distortion, with a default of (0.5, 0.5) 10 | @property(readwrite, nonatomic) CGPoint center; 11 | /// The radius of the distortion, ranging from 0.0 to 1.0, with a default of 0.25 12 | @property(readwrite, nonatomic) CGFloat radius; 13 | /// The amount of distortion to apply, from -1.0 to 1.0, with a default of 0.5 14 | @property(readwrite, nonatomic) CGFloat scale; 15 | 16 | @end 17 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageCGAColorspaceFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageCGAColorspaceFilter : GPUImageFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | /** Selectively replaces a color in the first image with the second image 4 | */ 5 | @interface GPUImageChromaKeyBlendFilter : GPUImageTwoInputFilter 6 | { 7 | GLint colorToReplaceUniform, thresholdSensitivityUniform, smoothingUniform; 8 | } 9 | 10 | /** The threshold sensitivity controls how similar pixels need to be colored to be replaced 11 | 12 | The default value is 0.3 13 | */ 14 | @property(readwrite, nonatomic) CGFloat thresholdSensitivity; 15 | 16 | /** The degree of smoothing controls how gradually similar colors are replaced in the image 17 | 18 | The default value is 0.1 19 | */ 20 | @property(readwrite, nonatomic) CGFloat smoothing; 21 | 22 | /** The color to be replaced is specified using individual red, green, and blue components (normalized to 1.0). 23 | 24 | The default is green: (0.0, 1.0, 0.0). 25 | 26 | @param redComponent Red component of color to be replaced 27 | @param greenComponent Green component of color to be replaced 28 | @param blueComponent Blue component of color to be replaced 29 | */ 30 | - (void)setColorToReplaceRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; 31 | 32 | @end 33 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageChromaKeyFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageChromaKeyFilter : GPUImageFilter 4 | { 5 | GLint colorToReplaceUniform, thresholdSensitivityUniform, smoothingUniform; 6 | } 7 | 8 | /** The threshold sensitivity controls how similar pixels need to be colored to be replaced 9 | 10 | The default value is 0.3 11 | */ 12 | @property(readwrite, nonatomic) CGFloat thresholdSensitivity; 13 | 14 | /** The degree of smoothing controls how gradually similar colors are replaced in the image 15 | 16 | The default value is 0.1 17 | */ 18 | @property(readwrite, nonatomic) CGFloat smoothing; 19 | 20 | /** The color to be replaced is specified using individual red, green, and blue components (normalized to 1.0). 21 | 22 | The default is green: (0.0, 1.0, 0.0). 23 | 24 | @param redComponent Red component of color to be replaced 25 | @param greenComponent Green component of color to be replaced 26 | @param blueComponent Blue component of color to be replaced 27 | */ 28 | - (void)setColorToReplaceRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; 29 | 30 | @end 31 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageClosingFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImageErosionFilter; 4 | @class GPUImageDilationFilter; 5 | 6 | // A filter that first performs a dilation on the red channel of an image, followed by an erosion of the same radius. 7 | // This helps to filter out smaller dark elements. 8 | 9 | @interface GPUImageClosingFilter : GPUImageFilterGroup 10 | { 11 | GPUImageErosionFilter *erosionFilter; 12 | GPUImageDilationFilter *dilationFilter; 13 | } 14 | 15 | @property(readwrite, nonatomic) CGFloat verticalTexelSpacing, horizontalTexelSpacing; 16 | 17 | - (id)initWithRadius:(NSUInteger)radius; 18 | 19 | @end 20 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageClosingFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageClosingFilter.h" 2 | #import "GPUImageErosionFilter.h" 3 | #import "GPUImageDilationFilter.h" 4 | 5 | @implementation GPUImageClosingFilter 6 | 7 | @synthesize verticalTexelSpacing = _verticalTexelSpacing; 8 | @synthesize horizontalTexelSpacing = _horizontalTexelSpacing; 9 | 10 | - (id)init; 11 | { 12 | if (!(self = [self initWithRadius:1])) 13 | { 14 | return nil; 15 | } 16 | 17 | return self; 18 | } 19 | 20 | - (id)initWithRadius:(NSUInteger)radius; 21 | { 22 | if (!(self = [super init])) 23 | { 24 | return nil; 25 | } 26 | 27 | // First pass: dilation 28 | dilationFilter = [[GPUImageDilationFilter alloc] initWithRadius:radius]; 29 | [self addFilter:dilationFilter]; 30 | 31 | // Second pass: erosion 32 | erosionFilter = [[GPUImageErosionFilter alloc] initWithRadius:radius]; 33 | [self addFilter:erosionFilter]; 34 | 35 | [dilationFilter addTarget:erosionFilter]; 36 | 37 | self.initialFilters = [NSArray arrayWithObjects:dilationFilter, nil]; 38 | self.terminalFilter = erosionFilter; 39 | 40 | return self; 41 | } 42 | 43 | - (void)setVerticalTexelSpacing:(CGFloat)newValue; 44 | { 45 | _verticalTexelSpacing = newValue; 46 | erosionFilter.verticalTexelSpacing = newValue; 47 | dilationFilter.verticalTexelSpacing = newValue; 48 | } 49 | 50 | - (void)setHorizontalTexelSpacing:(CGFloat)newValue; 51 | { 52 | _horizontalTexelSpacing = newValue; 53 | erosionFilter.horizontalTexelSpacing = newValue; 54 | dilationFilter.horizontalTexelSpacing = newValue; 55 | } 56 | 57 | @end 58 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColorBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageColorBlendFilter : GPUImageTwoInputFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColorBurnBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | /** Applies a color burn blend of two images 4 | */ 5 | @interface GPUImageColorBurnBlendFilter : GPUImageTwoInputFilter 6 | { 7 | } 8 | 9 | @end 10 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColorBurnBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageColorBurnBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageColorBurnBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | void main() 13 | { 14 | mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 15 | mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 16 | mediump vec4 whiteColor = vec4(1.0); 17 | gl_FragColor = whiteColor - (whiteColor - textureColor) / textureColor2; 18 | } 19 | ); 20 | #else 21 | NSString *const kGPUImageColorBurnBlendFragmentShaderString = SHADER_STRING 22 | ( 23 | varying vec2 textureCoordinate; 24 | varying vec2 textureCoordinate2; 25 | 26 | uniform sampler2D inputImageTexture; 27 | uniform sampler2D inputImageTexture2; 28 | 29 | void main() 30 | { 31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 33 | vec4 whiteColor = vec4(1.0); 34 | gl_FragColor = whiteColor - (whiteColor - textureColor) / textureColor2; 35 | } 36 | ); 37 | #endif 38 | 39 | @implementation GPUImageColorBurnBlendFilter 40 | 41 | - (id)init; 42 | { 43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageColorBurnBlendFragmentShaderString])) 44 | { 45 | return nil; 46 | } 47 | 48 | return self; 49 | } 50 | 51 | @end 52 | 53 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColorConversion.h: -------------------------------------------------------------------------------- 1 | #ifndef GPUImageColorConversion_h 2 | #define GPUImageColorConversion_h 3 | 4 | extern GLfloat *kColorConversion601; 5 | extern GLfloat *kColorConversion601FullRange; 6 | extern GLfloat *kColorConversion709; 7 | extern NSString *const kGPUImageYUVVideoRangeConversionForRGFragmentShaderString; 8 | extern NSString *const kGPUImageYUVFullRangeConversionForLAFragmentShaderString; 9 | extern NSString *const kGPUImageYUVVideoRangeConversionForLAFragmentShaderString; 10 | 11 | 12 | #endif /* GPUImageColorConversion_h */ 13 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColorDodgeBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | /** Applies a color dodge blend of two images 4 | */ 5 | @interface GPUImageColorDodgeBlendFilter : GPUImageTwoInputFilter 6 | { 7 | } 8 | 9 | @end 10 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColorInvertFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageColorInvertFilter : GPUImageFilter 4 | { 5 | } 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColorInvertFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageColorInvertFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageInvertFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | 8 | uniform sampler2D inputImageTexture; 9 | 10 | void main() 11 | { 12 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 13 | 14 | gl_FragColor = vec4((1.0 - textureColor.rgb), textureColor.w); 15 | } 16 | ); 17 | #else 18 | NSString *const kGPUImageInvertFragmentShaderString = SHADER_STRING 19 | ( 20 | varying vec2 textureCoordinate; 21 | 22 | uniform sampler2D inputImageTexture; 23 | 24 | void main() 25 | { 26 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 27 | 28 | gl_FragColor = vec4((1.0 - textureColor.rgb), textureColor.w); 29 | } 30 | ); 31 | #endif 32 | 33 | @implementation GPUImageColorInvertFilter 34 | 35 | - (id)init; 36 | { 37 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageInvertFragmentShaderString])) 38 | { 39 | return nil; 40 | } 41 | 42 | return self; 43 | } 44 | 45 | @end 46 | 47 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColorLocalBinaryPatternFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImage3x3TextureSamplingFilter.h" 2 | 3 | @interface GPUImageColorLocalBinaryPatternFilter : GPUImage3x3TextureSamplingFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColorMatrixFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** Transforms the colors of an image by applying a matrix to them 4 | */ 5 | @interface GPUImageColorMatrixFilter : GPUImageFilter 6 | { 7 | GLint colorMatrixUniform; 8 | GLint intensityUniform; 9 | } 10 | 11 | /** A 4x4 matrix used to transform each color in an image 12 | */ 13 | @property(readwrite, nonatomic) GPUMatrix4x4 colorMatrix; 14 | 15 | /** The degree to which the new transformed color replaces the original color for each pixel 16 | */ 17 | @property(readwrite, nonatomic) CGFloat intensity; 18 | 19 | @end 20 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColorPackingFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageColorPackingFilter : GPUImageFilter 4 | { 5 | GLint texelWidthUniform, texelHeightUniform; 6 | 7 | CGFloat texelWidth, texelHeight; 8 | } 9 | 10 | @end 11 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTFeatureDetector.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | // This generates image-wide feature descriptors using the ColourFAST process, as developed and described in 4 | // 5 | // A. Ensor and S. Hall. ColourFAST: GPU-based feature point detection and tracking on mobile devices. 28th International Conference of Image and Vision Computing, New Zealand, 2013, p. 124-129. 6 | // 7 | // Seth Hall, "GPU accelerated feature algorithms for mobile devices", PhD thesis, School of Computing and Mathematical Sciences, Auckland University of Technology 2014. 8 | // http://aut.researchgateway.ac.nz/handle/10292/7991 9 | 10 | @class GPUImageColourFASTSamplingOperation; 11 | @class GPUImageBoxBlurFilter; 12 | 13 | @interface GPUImageColourFASTFeatureDetector : GPUImageFilterGroup 14 | { 15 | GPUImageBoxBlurFilter *blurFilter; 16 | GPUImageColourFASTSamplingOperation *colourFASTSamplingOperation; 17 | } 18 | // The blur radius of the underlying box blur. The default is 3.0. 19 | @property (readwrite, nonatomic) CGFloat blurRadiusInPixels; 20 | 21 | @end 22 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTFeatureDetector.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageColourFASTFeatureDetector.h" 2 | #import "GPUImageColourFASTSamplingOperation.h" 3 | #import "GPUImageBoxBlurFilter.h" 4 | 5 | @implementation GPUImageColourFASTFeatureDetector 6 | 7 | @synthesize blurRadiusInPixels; 8 | 9 | - (id)init; 10 | { 11 | if (!(self = [super init])) 12 | { 13 | return nil; 14 | } 15 | 16 | // First pass: apply a variable Gaussian blur 17 | blurFilter = [[GPUImageBoxBlurFilter alloc] init]; 18 | [self addFilter:blurFilter]; 19 | 20 | // Second pass: combine the blurred image with the original sharp one 21 | colourFASTSamplingOperation = [[GPUImageColourFASTSamplingOperation alloc] init]; 22 | [self addFilter:colourFASTSamplingOperation]; 23 | 24 | // Texture location 0 needs to be the sharp image for both the blur and the second stage processing 25 | [blurFilter addTarget:colourFASTSamplingOperation atTextureLocation:1]; 26 | 27 | self.initialFilters = [NSArray arrayWithObjects:blurFilter, colourFASTSamplingOperation, nil]; 28 | self.terminalFilter = colourFASTSamplingOperation; 29 | 30 | self.blurRadiusInPixels = 3.0; 31 | 32 | return self; 33 | } 34 | 35 | #pragma mark - 36 | #pragma mark Accessors 37 | 38 | - (void)setBlurRadiusInPixels:(CGFloat)newValue; 39 | { 40 | blurFilter.blurRadiusInPixels = newValue; 41 | } 42 | 43 | - (CGFloat)blurRadiusInPixels; 44 | { 45 | return blurFilter.blurRadiusInPixels; 46 | } 47 | 48 | @end -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageColourFASTSamplingOperation.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | // This is the feature extraction phase of the ColourFAST feature detector, as described in: 4 | // 5 | // A. Ensor and S. Hall. ColourFAST: GPU-based feature point detection and tracking on mobile devices. 28th International Conference of Image and Vision Computing, New Zealand, 2013, p. 124-129. 6 | // 7 | // Seth Hall, "GPU accelerated feature algorithms for mobile devices", PhD thesis, School of Computing and Mathematical Sciences, Auckland University of Technology 2014. 8 | // http://aut.researchgateway.ac.nz/handle/10292/7991 9 | 10 | @interface GPUImageColourFASTSamplingOperation : GPUImageTwoInputFilter 11 | { 12 | GLint texelWidthUniform, texelHeightUniform; 13 | 14 | CGFloat texelWidth, texelHeight; 15 | BOOL hasOverriddenImageSizeFactor; 16 | } 17 | 18 | // The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects. 19 | @property(readwrite, nonatomic) CGFloat texelWidth; 20 | @property(readwrite, nonatomic) CGFloat texelHeight; 21 | 22 | @end 23 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageContrastFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** Adjusts the contrast of the image 4 | */ 5 | @interface GPUImageContrastFilter : GPUImageFilter 6 | { 7 | GLint contrastUniform; 8 | } 9 | 10 | /** Contrast ranges from 0.0 to 4.0 (max contrast), with 1.0 as the normal level 11 | */ 12 | @property(readwrite, nonatomic) CGFloat contrast; 13 | 14 | @end 15 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageContrastFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageContrastFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageContrastFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | 8 | uniform sampler2D inputImageTexture; 9 | uniform lowp float contrast; 10 | 11 | void main() 12 | { 13 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 14 | 15 | gl_FragColor = vec4(((textureColor.rgb - vec3(0.5)) * contrast + vec3(0.5)), textureColor.w); 16 | } 17 | ); 18 | #else 19 | NSString *const kGPUImageContrastFragmentShaderString = SHADER_STRING 20 | ( 21 | varying vec2 textureCoordinate; 22 | 23 | uniform sampler2D inputImageTexture; 24 | uniform float contrast; 25 | 26 | void main() 27 | { 28 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 29 | 30 | gl_FragColor = vec4(((textureColor.rgb - vec3(0.5)) * contrast + vec3(0.5)), textureColor.w); 31 | } 32 | ); 33 | #endif 34 | 35 | @implementation GPUImageContrastFilter 36 | 37 | @synthesize contrast = _contrast; 38 | 39 | #pragma mark - 40 | #pragma mark Initialization 41 | 42 | - (id)init; 43 | { 44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageContrastFragmentShaderString])) 45 | { 46 | return nil; 47 | } 48 | 49 | contrastUniform = [filterProgram uniformIndex:@"contrast"]; 50 | self.contrast = 1.0; 51 | 52 | return self; 53 | } 54 | 55 | #pragma mark - 56 | #pragma mark Accessors 57 | 58 | - (void)setContrast:(CGFloat)newValue; 59 | { 60 | _contrast = newValue; 61 | 62 | [self setFloat:_contrast forUniform:contrastUniform program:filterProgram]; 63 | } 64 | 65 | @end 66 | 67 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageCropFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageCropFilter : GPUImageFilter 4 | { 5 | GLfloat cropTextureCoordinates[8]; 6 | } 7 | 8 | // The crop region is the rectangle within the image to crop. It is normalized to a coordinate space from 0.0 to 1.0, with 0.0, 0.0 being the upper left corner of the image 9 | @property(readwrite, nonatomic) CGRect cropRegion; 10 | 11 | // Initialization and teardown 12 | - (id)initWithCropRegion:(CGRect)newCropRegion; 13 | 14 | @end 15 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageCrosshairGenerator.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageCrosshairGenerator : GPUImageFilter 4 | { 5 | GLint crosshairWidthUniform, crosshairColorUniform; 6 | } 7 | 8 | // The width of the displayed crosshairs, in pixels. Currently this only works well for odd widths. The default is 5. 9 | @property(readwrite, nonatomic) CGFloat crosshairWidth; 10 | 11 | // The color of the crosshairs is specified using individual red, green, and blue components (normalized to 1.0). The default is green: (0.0, 1.0, 0.0). 12 | - (void)setCrosshairColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; 13 | 14 | // Rendering 15 | - (void)renderCrosshairsFromArray:(GLfloat *)crosshairCoordinates count:(NSUInteger)numberOfCrosshairs frameTime:(CMTime)frameTime; 16 | 17 | @end 18 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageCrosshatchFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageCrosshatchFilter : GPUImageFilter 4 | { 5 | GLint crossHatchSpacingUniform, lineWidthUniform; 6 | } 7 | // The fractional width of the image to use as the spacing for the crosshatch. The default is 0.03. 8 | @property(readwrite, nonatomic) CGFloat crossHatchSpacing; 9 | 10 | // A relative width for the crosshatch lines. The default is 0.003. 11 | @property(readwrite, nonatomic) CGFloat lineWidth; 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageDarkenBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageDarkenBlendFilter : GPUImageTwoInputFilter 4 | { 5 | } 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageDarkenBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageDarkenBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageDarkenBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | void main() 13 | { 14 | lowp vec4 base = texture2D(inputImageTexture, textureCoordinate); 15 | lowp vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2); 16 | 17 | gl_FragColor = vec4(min(overlayer.rgb * base.a, base.rgb * overlayer.a) + overlayer.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlayer.a), 1.0); 18 | } 19 | ); 20 | #else 21 | NSString *const kGPUImageDarkenBlendFragmentShaderString = SHADER_STRING 22 | ( 23 | varying vec2 textureCoordinate; 24 | varying vec2 textureCoordinate2; 25 | 26 | uniform sampler2D inputImageTexture; 27 | uniform sampler2D inputImageTexture2; 28 | 29 | void main() 30 | { 31 | vec4 base = texture2D(inputImageTexture, textureCoordinate); 32 | vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2); 33 | 34 | gl_FragColor = vec4(min(overlayer.rgb * base.a, base.rgb * overlayer.a) + overlayer.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlayer.a), 1.0); 35 | } 36 | ); 37 | #endif 38 | 39 | @implementation GPUImageDarkenBlendFilter 40 | 41 | - (id)init; 42 | { 43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageDarkenBlendFragmentShaderString])) 44 | { 45 | return nil; 46 | } 47 | 48 | return self; 49 | } 50 | 51 | @end 52 | 53 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageDifferenceBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageDifferenceBlendFilter : GPUImageTwoInputFilter 4 | { 5 | } 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageDifferenceBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageDifferenceBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageDifferenceBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | void main() 13 | { 14 | mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 15 | mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 16 | gl_FragColor = vec4(abs(textureColor2.rgb - textureColor.rgb), textureColor.a); 17 | } 18 | ); 19 | #else 20 | NSString *const kGPUImageDifferenceBlendFragmentShaderString = SHADER_STRING 21 | ( 22 | varying vec2 textureCoordinate; 23 | varying vec2 textureCoordinate2; 24 | 25 | uniform sampler2D inputImageTexture; 26 | uniform sampler2D inputImageTexture2; 27 | 28 | void main() 29 | { 30 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 31 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 32 | gl_FragColor = vec4(abs(textureColor2.rgb - textureColor.rgb), textureColor.a); 33 | } 34 | ); 35 | #endif 36 | 37 | @implementation GPUImageDifferenceBlendFilter 38 | 39 | - (id)init; 40 | { 41 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageDifferenceBlendFragmentShaderString])) 42 | { 43 | return nil; 44 | } 45 | 46 | return self; 47 | } 48 | 49 | @end 50 | 51 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageDilationFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoPassTextureSamplingFilter.h" 2 | 3 | // For each pixel, this sets it to the maximum value of the red channel in a rectangular neighborhood extending out dilationRadius pixels from the center. 4 | // This extends out bright features, and is most commonly used with black-and-white thresholded images. 5 | 6 | extern NSString *const kGPUImageDilationRadiusOneVertexShaderString; 7 | extern NSString *const kGPUImageDilationRadiusTwoVertexShaderString; 8 | extern NSString *const kGPUImageDilationRadiusThreeVertexShaderString; 9 | extern NSString *const kGPUImageDilationRadiusFourVertexShaderString; 10 | 11 | @interface GPUImageDilationFilter : GPUImageTwoPassTextureSamplingFilter 12 | 13 | // Acceptable values for dilationRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4. 14 | - (id)initWithRadius:(NSUInteger)dilationRadius; 15 | 16 | @end 17 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalNonMaximumSuppressionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageDirectionalNonMaximumSuppressionFilter : GPUImageFilter 4 | { 5 | GLint texelWidthUniform, texelHeightUniform; 6 | GLint upperThresholdUniform, lowerThresholdUniform; 7 | 8 | BOOL hasOverriddenImageSizeFactor; 9 | } 10 | 11 | // The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects. 12 | @property(readwrite, nonatomic) CGFloat texelWidth; 13 | @property(readwrite, nonatomic) CGFloat texelHeight; 14 | 15 | // These thresholds set cutoffs for the intensities that definitely get registered (upper threshold) and those that definitely don't (lower threshold) 16 | @property(readwrite, nonatomic) CGFloat upperThreshold; 17 | @property(readwrite, nonatomic) CGFloat lowerThreshold; 18 | 19 | @end 20 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageDirectionalSobelEdgeDetectionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImage3x3TextureSamplingFilter.h" 2 | 3 | @interface GPUImageDirectionalSobelEdgeDetectionFilter : GPUImage3x3TextureSamplingFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageDissolveBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageDissolveBlendFilter : GPUImageTwoInputFilter 4 | { 5 | GLint mixUniform; 6 | } 7 | 8 | // Mix ranges from 0.0 (only image 1) to 1.0 (only image 2), with 0.5 (half of either) as the normal level 9 | @property(readwrite, nonatomic) CGFloat mix; 10 | 11 | @end 12 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageDissolveBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageDissolveBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageDissolveBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | uniform lowp float mixturePercent; 12 | 13 | void main() 14 | { 15 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 16 | lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 17 | 18 | gl_FragColor = mix(textureColor, textureColor2, mixturePercent); 19 | } 20 | ); 21 | #else 22 | NSString *const kGPUImageDissolveBlendFragmentShaderString = SHADER_STRING 23 | ( 24 | varying vec2 textureCoordinate; 25 | varying vec2 textureCoordinate2; 26 | 27 | uniform sampler2D inputImageTexture; 28 | uniform sampler2D inputImageTexture2; 29 | uniform float mixturePercent; 30 | 31 | void main() 32 | { 33 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 34 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 35 | 36 | gl_FragColor = mix(textureColor, textureColor2, mixturePercent); 37 | } 38 | ); 39 | #endif 40 | 41 | @implementation GPUImageDissolveBlendFilter 42 | 43 | @synthesize mix = _mix; 44 | 45 | #pragma mark - 46 | #pragma mark Initialization and teardown 47 | 48 | - (id)init; 49 | { 50 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageDissolveBlendFragmentShaderString])) 51 | { 52 | return nil; 53 | } 54 | 55 | mixUniform = [filterProgram uniformIndex:@"mixturePercent"]; 56 | self.mix = 0.5; 57 | 58 | return self; 59 | } 60 | 61 | #pragma mark - 62 | #pragma mark Accessors 63 | 64 | - (void)setMix:(CGFloat)newValue; 65 | { 66 | _mix = newValue; 67 | 68 | [self setFloat:_mix forUniform:mixUniform program:filterProgram]; 69 | } 70 | 71 | @end 72 | 73 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageDivideBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageDivideBlendFilter : GPUImageTwoInputFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageEmbossFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImage3x3ConvolutionFilter.h" 2 | 3 | @interface GPUImageEmbossFilter : GPUImage3x3ConvolutionFilter 4 | 5 | // The strength of the embossing, from 0.0 to 4.0, with 1.0 as the normal level 6 | @property(readwrite, nonatomic) CGFloat intensity; 7 | 8 | @end 9 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageEmbossFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageEmbossFilter.h" 2 | 3 | @implementation GPUImageEmbossFilter 4 | 5 | @synthesize intensity = _intensity; 6 | 7 | - (id)init; 8 | { 9 | if (!(self = [super init])) 10 | { 11 | return nil; 12 | } 13 | 14 | self.intensity = 1.0; 15 | 16 | return self; 17 | } 18 | 19 | #pragma mark - 20 | #pragma mark Accessors 21 | 22 | - (void)setIntensity:(CGFloat)newValue; 23 | { 24 | // [(GPUImage3x3ConvolutionFilter *)filter setConvolutionMatrix:(GPUMatrix3x3){ 25 | // {-2.0f, -1.0f, 0.0f}, 26 | // {-1.0f, 1.0f, 1.0f}, 27 | // { 0.0f, 1.0f, 2.0f} 28 | // }]; 29 | 30 | _intensity = newValue; 31 | 32 | GPUMatrix3x3 newConvolutionMatrix; 33 | newConvolutionMatrix.one.one = _intensity * (-2.0); 34 | newConvolutionMatrix.one.two = -_intensity; 35 | newConvolutionMatrix.one.three = 0.0f; 36 | 37 | newConvolutionMatrix.two.one = -_intensity; 38 | newConvolutionMatrix.two.two = 1.0; 39 | newConvolutionMatrix.two.three = _intensity; 40 | 41 | newConvolutionMatrix.three.one = 0.0f; 42 | newConvolutionMatrix.three.two = _intensity; 43 | newConvolutionMatrix.three.three = _intensity * 2.0; 44 | 45 | self.convolutionKernel = newConvolutionMatrix; 46 | } 47 | 48 | 49 | @end 50 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageErosionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoPassTextureSamplingFilter.h" 2 | 3 | // For each pixel, this sets it to the minimum value of the red channel in a rectangular neighborhood extending out dilationRadius pixels from the center. 4 | // This extends out dark features, and is most commonly used with black-and-white thresholded images. 5 | 6 | @interface GPUImageErosionFilter : GPUImageTwoPassTextureSamplingFilter 7 | 8 | // Acceptable values for erosionRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4. 9 | - (id)initWithRadius:(NSUInteger)erosionRadius; 10 | 11 | @end 12 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageExclusionBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageExclusionBlendFilter : GPUImageTwoInputFilter 4 | { 5 | } 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageExclusionBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageExclusionBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageExclusionBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | void main() 13 | { 14 | mediump vec4 base = texture2D(inputImageTexture, textureCoordinate); 15 | mediump vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); 16 | 17 | // Dca = (Sca.Da + Dca.Sa - 2.Sca.Dca) + Sca.(1 - Da) + Dca.(1 - Sa) 18 | 19 | gl_FragColor = vec4((overlay.rgb * base.a + base.rgb * overlay.a - 2.0 * overlay.rgb * base.rgb) + overlay.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlay.a), base.a); 20 | } 21 | ); 22 | #else 23 | NSString *const kGPUImageExclusionBlendFragmentShaderString = SHADER_STRING 24 | ( 25 | varying vec2 textureCoordinate; 26 | varying vec2 textureCoordinate2; 27 | 28 | uniform sampler2D inputImageTexture; 29 | uniform sampler2D inputImageTexture2; 30 | 31 | void main() 32 | { 33 | vec4 base = texture2D(inputImageTexture, textureCoordinate); 34 | vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); 35 | 36 | // Dca = (Sca.Da + Dca.Sa - 2.Sca.Dca) + Sca.(1 - Da) + Dca.(1 - Sa) 37 | 38 | gl_FragColor = vec4((overlay.rgb * base.a + base.rgb * overlay.a - 2.0 * overlay.rgb * base.rgb) + overlay.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlay.a), base.a); 39 | } 40 | ); 41 | #endif 42 | 43 | @implementation GPUImageExclusionBlendFilter 44 | 45 | - (id)init; 46 | { 47 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageExclusionBlendFragmentShaderString])) 48 | { 49 | return nil; 50 | } 51 | 52 | return self; 53 | } 54 | 55 | @end 56 | 57 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageExposureFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageExposureFilter : GPUImageFilter 4 | { 5 | GLint exposureUniform; 6 | } 7 | 8 | // Exposure ranges from -10.0 to 10.0, with 0.0 as the normal level 9 | @property(readwrite, nonatomic) CGFloat exposure; 10 | 11 | @end 12 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageExposureFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageExposureFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageExposureFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | 8 | uniform sampler2D inputImageTexture; 9 | uniform highp float exposure; 10 | 11 | void main() 12 | { 13 | highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 14 | 15 | gl_FragColor = vec4(textureColor.rgb * pow(2.0, exposure), textureColor.w); 16 | } 17 | ); 18 | #else 19 | NSString *const kGPUImageExposureFragmentShaderString = SHADER_STRING 20 | ( 21 | varying vec2 textureCoordinate; 22 | 23 | uniform sampler2D inputImageTexture; 24 | uniform float exposure; 25 | 26 | void main() 27 | { 28 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 29 | 30 | gl_FragColor = vec4(textureColor.rgb * pow(2.0, exposure), textureColor.w); 31 | } 32 | ); 33 | #endif 34 | 35 | @implementation GPUImageExposureFilter 36 | 37 | @synthesize exposure = _exposure; 38 | 39 | #pragma mark - 40 | #pragma mark Initialization and teardown 41 | 42 | - (id)init; 43 | { 44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageExposureFragmentShaderString])) 45 | { 46 | return nil; 47 | } 48 | 49 | exposureUniform = [filterProgram uniformIndex:@"exposure"]; 50 | self.exposure = 0.0; 51 | 52 | return self; 53 | } 54 | 55 | #pragma mark - 56 | #pragma mark Accessors 57 | 58 | - (void)setExposure:(CGFloat)newValue; 59 | { 60 | _exposure = newValue; 61 | 62 | [self setFloat:_exposure forUniform:exposureUniform program:filterProgram]; 63 | } 64 | 65 | @end 66 | 67 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageFASTCornerDetectionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImageGrayscaleFilter; 4 | @class GPUImage3x3TextureSamplingFilter; 5 | @class GPUImageNonMaximumSuppressionFilter; 6 | 7 | /* 8 | An implementation of the Features from Accelerated Segment Test (FAST) feature detector as described in the following publications: 9 | 10 | E. Rosten and T. Drummond. Fusing points and lines for high performance tracking. IEEE International Conference on Computer Vision, 2005. 11 | E. Rosten and T. Drummond. Machine learning for high-speed corner detection. European Conference on Computer Vision, 2006. 12 | 13 | For more about the FAST feature detector, see the resources here: 14 | http://www.edwardrosten.com/work/fast.html 15 | */ 16 | 17 | typedef enum { kGPUImageFAST12Contiguous, kGPUImageFAST12ContiguousNonMaximumSuppressed} GPUImageFASTDetectorType; 18 | 19 | @interface GPUImageFASTCornerDetectionFilter : GPUImageFilterGroup 20 | { 21 | GPUImageGrayscaleFilter *luminanceReductionFilter; 22 | GPUImage3x3TextureSamplingFilter *featureDetectionFilter; 23 | GPUImageNonMaximumSuppressionFilter *nonMaximumSuppressionFilter; 24 | // Generate a lookup texture based on the bit patterns 25 | 26 | // Step 1: convert to monochrome if necessary 27 | // Step 2: do a lookup at each pixel based on the Bresenham circle, encode comparison in two color components 28 | // Step 3: do non-maximum suppression of close corner points 29 | } 30 | 31 | - (id)initWithFASTDetectorVariant:(GPUImageFASTDetectorType)detectorType; 32 | 33 | @end 34 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageFalseColorFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageFalseColorFilter : GPUImageFilter 4 | { 5 | GLint firstColorUniform, secondColorUniform; 6 | } 7 | 8 | // The first and second colors specify what colors replace the dark and light areas of the image, respectively. The defaults are (0.0, 0.0, 0.5) amd (1.0, 0.0, 0.0). 9 | @property(readwrite, nonatomic) GPUVector4 firstColor; 10 | @property(readwrite, nonatomic) GPUVector4 secondColor; 11 | 12 | - (void)setFirstColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; 13 | - (void)setSecondColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; 14 | 15 | @end 16 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageFilterGroup.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageOutput.h" 2 | #import "GPUImageFilter.h" 3 | 4 | @interface GPUImageFilterGroup : GPUImageOutput 5 | { 6 | NSMutableArray *filters; 7 | BOOL isEndProcessing; 8 | } 9 | 10 | @property(readwrite, nonatomic, strong) GPUImageOutput *terminalFilter; 11 | @property(readwrite, nonatomic, strong) NSArray *initialFilters; 12 | @property(readwrite, nonatomic, strong) GPUImageOutput *inputFilterToIgnoreForUpdates; 13 | 14 | // Filter management 15 | - (void)addFilter:(GPUImageOutput *)newFilter; 16 | - (GPUImageOutput *)filterAtIndex:(NSUInteger)filterIndex; 17 | - (NSUInteger)filterCount; 18 | 19 | @end 20 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageFilterPipeline.h: -------------------------------------------------------------------------------- 1 | #import 2 | #import "GPUImageOutput.h" 3 | 4 | @interface GPUImageFilterPipeline : NSObject 5 | { 6 | NSString *stringValue; 7 | } 8 | 9 | @property (strong) NSMutableArray *filters; 10 | 11 | @property (strong) GPUImageOutput *input; 12 | @property (strong) id output; 13 | 14 | - (id) initWithOrderedFilters:(NSArray*) filters input:(GPUImageOutput*)input output:(id )output; 15 | - (id) initWithConfiguration:(NSDictionary*) configuration input:(GPUImageOutput*)input output:(id )output; 16 | - (id) initWithConfigurationFile:(NSURL*) configuration input:(GPUImageOutput*)input output:(id )output; 17 | 18 | - (void) addFilter:(GPUImageOutput *)filter; 19 | - (void) addFilter:(GPUImageOutput *)filter atIndex:(NSUInteger)insertIndex; 20 | - (void) replaceFilterAtIndex:(NSUInteger)index withFilter:(GPUImageOutput *)filter; 21 | - (void) replaceAllFilters:(NSArray *) newFilters; 22 | - (void) removeFilter:(GPUImageOutput *)filter; 23 | - (void) removeFilterAtIndex:(NSUInteger)index; 24 | - (void) removeAllFilters; 25 | 26 | - (UIImage *) currentFilteredFrame; 27 | - (UIImage *) currentFilteredFrameWithOrientation:(UIImageOrientation)imageOrientation; 28 | - (CGImageRef) newCGImageFromCurrentFilteredFrame; 29 | 30 | @end 31 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageFourInputFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageThreeInputFilter.h" 2 | 3 | extern NSString *const kGPUImageFourInputTextureVertexShaderString; 4 | 5 | @interface GPUImageFourInputFilter : GPUImageThreeInputFilter 6 | { 7 | GPUImageFramebuffer *fourthInputFramebuffer; 8 | 9 | GLint filterFourthTextureCoordinateAttribute; 10 | GLint filterInputTextureUniform4; 11 | GPUImageRotationMode inputRotation4; 12 | GLuint filterSourceTexture4; 13 | CMTime fourthFrameTime; 14 | 15 | BOOL hasSetThirdTexture, hasReceivedFourthFrame, fourthFrameWasVideo; 16 | BOOL fourthFrameCheckDisabled; 17 | } 18 | 19 | - (void)disableFourthFrameCheck; 20 | 21 | @end 22 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageFramebuffer.h: -------------------------------------------------------------------------------- 1 | #import 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | #import 5 | #import 6 | #import 7 | #else 8 | #import 9 | #import 10 | #endif 11 | 12 | #import 13 | #import 14 | 15 | 16 | typedef struct GPUTextureOptions { 17 | GLenum minFilter; 18 | GLenum magFilter; 19 | GLenum wrapS; 20 | GLenum wrapT; 21 | GLenum internalFormat; 22 | GLenum format; 23 | GLenum type; 24 | } GPUTextureOptions; 25 | 26 | @interface GPUImageFramebuffer : NSObject 27 | 28 | @property(readonly) CGSize size; 29 | @property(readonly) GPUTextureOptions textureOptions; 30 | @property(readonly) GLuint texture; 31 | @property(readonly) BOOL missingFramebuffer; 32 | 33 | // Initialization and teardown 34 | - (id)initWithSize:(CGSize)framebufferSize; 35 | - (id)initWithSize:(CGSize)framebufferSize textureOptions:(GPUTextureOptions)fboTextureOptions onlyTexture:(BOOL)onlyGenerateTexture; 36 | - (id)initWithSize:(CGSize)framebufferSize overriddenTexture:(GLuint)inputTexture; 37 | 38 | // Usage 39 | - (void)activateFramebuffer; 40 | 41 | // Reference counting 42 | - (void)lock; 43 | - (void)unlock; 44 | - (void)clearAllLocks; 45 | - (void)disableReferenceCounting; 46 | - (void)enableReferenceCounting; 47 | 48 | // Image capture 49 | - (CGImageRef)newCGImageFromFramebufferContents; 50 | - (void)restoreRenderTarget; 51 | 52 | // Raw data bytes 53 | - (void)lockForReading; 54 | - (void)unlockAfterReading; 55 | - (NSUInteger)bytesPerRow; 56 | - (GLubyte *)byteBuffer; 57 | - (CVPixelBufferRef)pixelBuffer; 58 | 59 | @end 60 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageFramebufferCache.h: -------------------------------------------------------------------------------- 1 | #import 2 | #import 3 | #import "GPUImageFramebuffer.h" 4 | 5 | @interface GPUImageFramebufferCache : NSObject 6 | 7 | // Framebuffer management 8 | - (GPUImageFramebuffer *)fetchFramebufferForSize:(CGSize)framebufferSize textureOptions:(GPUTextureOptions)textureOptions onlyTexture:(BOOL)onlyTexture; 9 | - (GPUImageFramebuffer *)fetchFramebufferForSize:(CGSize)framebufferSize onlyTexture:(BOOL)onlyTexture; 10 | - (void)returnFramebufferToCache:(GPUImageFramebuffer *)framebuffer; 11 | - (void)purgeAllUnassignedFramebuffers; 12 | - (void)addFramebufferToActiveImageCaptureList:(GPUImageFramebuffer *)framebuffer; 13 | - (void)removeFramebufferFromActiveImageCaptureList:(GPUImageFramebuffer *)framebuffer; 14 | 15 | @end 16 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageGammaFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageGammaFilter : GPUImageFilter 4 | { 5 | GLint gammaUniform; 6 | } 7 | 8 | // Gamma ranges from 0.0 to 3.0, with 1.0 as the normal level 9 | @property(readwrite, nonatomic) CGFloat gamma; 10 | 11 | @end 12 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageGammaFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageGammaFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageGammaFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | 8 | uniform sampler2D inputImageTexture; 9 | uniform lowp float gamma; 10 | 11 | void main() 12 | { 13 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 14 | 15 | gl_FragColor = vec4(pow(textureColor.rgb, vec3(gamma)), textureColor.w); 16 | } 17 | ); 18 | #else 19 | NSString *const kGPUImageGammaFragmentShaderString = SHADER_STRING 20 | ( 21 | varying vec2 textureCoordinate; 22 | 23 | uniform sampler2D inputImageTexture; 24 | uniform float gamma; 25 | 26 | void main() 27 | { 28 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 29 | 30 | gl_FragColor = vec4(pow(textureColor.rgb, vec3(gamma)), textureColor.w); 31 | } 32 | ); 33 | #endif 34 | 35 | @implementation GPUImageGammaFilter 36 | 37 | @synthesize gamma = _gamma; 38 | 39 | #pragma mark - 40 | #pragma mark Initialization and teardown 41 | 42 | - (id)init; 43 | { 44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageGammaFragmentShaderString])) 45 | { 46 | return nil; 47 | } 48 | 49 | gammaUniform = [filterProgram uniformIndex:@"gamma"]; 50 | self.gamma = 1.0; 51 | 52 | return self; 53 | } 54 | 55 | #pragma mark - 56 | #pragma mark Accessors 57 | 58 | - (void)setGamma:(CGFloat)newValue; 59 | { 60 | _gamma = newValue; 61 | 62 | [self setFloat:_gamma forUniform:gammaUniform program:filterProgram]; 63 | } 64 | 65 | @end 66 | 67 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoPassTextureSamplingFilter.h" 2 | 3 | /** A Gaussian blur filter 4 | Interpolated optimization based on Daniel Rákos' work at http://rastergrid.com/blog/2010/09/efficient-gaussian-blur-with-linear-sampling/ 5 | */ 6 | 7 | @interface GPUImageGaussianBlurFilter : GPUImageTwoPassTextureSamplingFilter 8 | { 9 | BOOL shouldResizeBlurRadiusWithImageSize; 10 | CGFloat _blurRadiusInPixels; 11 | } 12 | 13 | /** A multiplier for the spacing between texels, ranging from 0.0 on up, with a default of 1.0. Adjusting this may slightly increase the blur strength, but will introduce artifacts in the result. 14 | */ 15 | @property (readwrite, nonatomic) CGFloat texelSpacingMultiplier; 16 | 17 | /** A radius in pixels to use for the blur, with a default of 2.0. This adjusts the sigma variable in the Gaussian distribution function. 18 | */ 19 | @property (readwrite, nonatomic) CGFloat blurRadiusInPixels; 20 | 21 | /** Setting these properties will allow the blur radius to scale with the size of the image. These properties are mutually exclusive; setting either will set the other to 0. 22 | */ 23 | @property (readwrite, nonatomic) CGFloat blurRadiusAsFractionOfImageWidth; 24 | @property (readwrite, nonatomic) CGFloat blurRadiusAsFractionOfImageHeight; 25 | 26 | /// The number of times to sequentially blur the incoming image. The more passes, the slower the filter. 27 | @property(readwrite, nonatomic) NSUInteger blurPasses; 28 | 29 | + (NSString *)vertexShaderForStandardBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; 30 | + (NSString *)fragmentShaderForStandardBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; 31 | + (NSString *)vertexShaderForOptimizedBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; 32 | + (NSString *)fragmentShaderForOptimizedBlurOfRadius:(NSUInteger)blurRadius sigma:(CGFloat)sigma; 33 | 34 | - (void)switchToVertexShader:(NSString *)newVertexShader fragmentShader:(NSString *)newFragmentShader; 35 | 36 | @end 37 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageGaussianBlurPositionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoPassTextureSamplingFilter.h" 2 | 3 | /** A more generalized 9x9 Gaussian blur filter 4 | */ 5 | @interface GPUImageGaussianBlurPositionFilter : GPUImageTwoPassTextureSamplingFilter 6 | { 7 | GLint blurCenterUniform, blurRadiusUniform, aspectRatioUniform; 8 | } 9 | 10 | /** A multiplier for the blur size, ranging from 0.0 on up, with a default of 1.0 11 | */ 12 | @property (readwrite, nonatomic) CGFloat blurSize; 13 | 14 | /** Center for the blur, defaults to 0.5, 0.5 15 | */ 16 | @property (readwrite, nonatomic) CGPoint blurCenter; 17 | 18 | /** Radius for the blur, defaults to 1.0 19 | */ 20 | @property (readwrite, nonatomic) CGFloat blurRadius; 21 | 22 | @end 23 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageGaussianSelectiveBlurFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImageGaussianBlurFilter; 4 | 5 | /** A Gaussian blur that preserves focus within a circular region 6 | */ 7 | @interface GPUImageGaussianSelectiveBlurFilter : GPUImageFilterGroup 8 | { 9 | GPUImageGaussianBlurFilter *blurFilter; 10 | GPUImageFilter *selectiveFocusFilter; 11 | BOOL hasOverriddenAspectRatio; 12 | } 13 | 14 | /** The radius of the circular area being excluded from the blur 15 | */ 16 | @property (readwrite, nonatomic) CGFloat excludeCircleRadius; 17 | /** The center of the circular area being excluded from the blur 18 | */ 19 | @property (readwrite, nonatomic) CGPoint excludeCirclePoint; 20 | /** The size of the area between the blurred portion and the clear circle 21 | */ 22 | @property (readwrite, nonatomic) CGFloat excludeBlurSize; 23 | /** A radius in pixels to use for the blur, with a default of 5.0. This adjusts the sigma variable in the Gaussian distribution function. 24 | */ 25 | @property (readwrite, nonatomic) CGFloat blurRadiusInPixels; 26 | /** The aspect ratio of the image, used to adjust the circularity of the in-focus region. By default, this matches the image aspect ratio, but you can override this value. 27 | */ 28 | @property (readwrite, nonatomic) CGFloat aspectRatio; 29 | 30 | @end 31 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageGlassSphereFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageSphereRefractionFilter.h" 2 | 3 | @interface GPUImageGlassSphereFilter : GPUImageSphereRefractionFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageGrayscaleFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | extern NSString *const kGPUImageLuminanceFragmentShaderString; 4 | 5 | /** Converts an image to grayscale (a slightly faster implementation of the saturation filter, without the ability to vary the color contribution) 6 | */ 7 | @interface GPUImageGrayscaleFilter : GPUImageFilter 8 | 9 | @end 10 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHSBFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageColorMatrixFilter.h" 2 | 3 | @interface GPUImageHSBFilter : GPUImageColorMatrixFilter 4 | 5 | /** Reset the filter to have no transformations. 6 | */ 7 | - (void)reset; 8 | 9 | /** Add a hue rotation to the filter. 10 | The hue rotation is in the range [-360, 360] with 0 being no-change. 11 | Note that this adjustment is additive, so use the reset method if you need to. 12 | */ 13 | - (void)rotateHue:(float)h; 14 | 15 | /** Add a saturation adjustment to the filter. 16 | The saturation adjustment is in the range [0.0, 2.0] with 1.0 being no-change. 17 | Note that this adjustment is additive, so use the reset method if you need to. 18 | */ 19 | - (void)adjustSaturation:(float)s; 20 | 21 | /** Add a brightness adjustment to the filter. 22 | The brightness adjustment is in the range [0.0, 2.0] with 1.0 being no-change. 23 | Note that this adjustment is additive, so use the reset method if you need to. 24 | */ 25 | - (void)adjustBrightness:(float)b; 26 | 27 | @end 28 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHalftoneFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImagePixellateFilter.h" 2 | 3 | @interface GPUImageHalftoneFilter : GPUImagePixellateFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHardLightBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageHardLightBlendFilter : GPUImageTwoInputFilter 4 | { 5 | } 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHazeFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /* 4 | * The haze filter can be used to add or remove haze (similar to a UV filter) 5 | * 6 | * @author Alaric Cole 7 | * @creationDate 03/10/12 8 | * 9 | */ 10 | 11 | /** The haze filter can be used to add or remove haze 12 | 13 | This is similar to a UV filter 14 | */ 15 | @interface GPUImageHazeFilter : GPUImageFilter 16 | { 17 | GLint distanceUniform; 18 | GLint slopeUniform; 19 | } 20 | 21 | /** Strength of the color applied. Default 0. Values between -.3 and .3 are best 22 | */ 23 | @property(readwrite, nonatomic) CGFloat distance; 24 | 25 | /** Amount of color change. Default 0. Values between -.3 and .3 are best 26 | */ 27 | @property(readwrite, nonatomic) CGFloat slope; 28 | 29 | @end 30 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHighPassFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | #import "GPUImageLowPassFilter.h" 3 | #import "GPUImageDifferenceBlendFilter.h" 4 | 5 | @interface GPUImageHighPassFilter : GPUImageFilterGroup 6 | { 7 | GPUImageLowPassFilter *lowPassFilter; 8 | GPUImageDifferenceBlendFilter *differenceBlendFilter; 9 | } 10 | 11 | // This controls the degree by which the previous accumulated frames are blended and then subtracted from the current one. This ranges from 0.0 to 1.0, with a default of 0.5. 12 | @property(readwrite, nonatomic) CGFloat filterStrength; 13 | 14 | @end 15 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHighPassFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageHighPassFilter.h" 2 | 3 | @implementation GPUImageHighPassFilter 4 | 5 | @synthesize filterStrength; 6 | 7 | - (id)init; 8 | { 9 | if (!(self = [super init])) 10 | { 11 | return nil; 12 | } 13 | 14 | // Start with a low pass filter to define the component to be removed 15 | lowPassFilter = [[GPUImageLowPassFilter alloc] init]; 16 | [self addFilter:lowPassFilter]; 17 | 18 | // Take the difference of the current frame from the low pass filtered result to get the high pass 19 | differenceBlendFilter = [[GPUImageDifferenceBlendFilter alloc] init]; 20 | [self addFilter:differenceBlendFilter]; 21 | 22 | // Texture location 0 needs to be the original image for the difference blend 23 | [lowPassFilter addTarget:differenceBlendFilter atTextureLocation:1]; 24 | 25 | self.initialFilters = [NSArray arrayWithObjects:lowPassFilter, differenceBlendFilter, nil]; 26 | self.terminalFilter = differenceBlendFilter; 27 | 28 | self.filterStrength = 0.5; 29 | 30 | return self; 31 | } 32 | 33 | #pragma mark - 34 | #pragma mark Accessors 35 | 36 | - (void)setFilterStrength:(CGFloat)newValue; 37 | { 38 | lowPassFilter.filterStrength = newValue; 39 | } 40 | 41 | - (CGFloat)filterStrength; 42 | { 43 | return lowPassFilter.filterStrength; 44 | } 45 | 46 | @end 47 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageHighlightShadowFilter : GPUImageFilter 4 | { 5 | GLint shadowsUniform, highlightsUniform; 6 | } 7 | 8 | /** 9 | * 0 - 1, increase to lighten shadows. 10 | * @default 0 11 | */ 12 | @property(readwrite, nonatomic) CGFloat shadows; 13 | 14 | /** 15 | * 0 - 1, decrease to darken highlights. 16 | * @default 1 17 | */ 18 | @property(readwrite, nonatomic) CGFloat highlights; 19 | 20 | @end 21 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHighlightShadowTintFilter.h: -------------------------------------------------------------------------------- 1 | // 2 | // GPUImageHighlightShadowTintFilter.h 3 | // 4 | // 5 | // Created by github.com/r3mus on 8/14/15. 6 | // 7 | // 8 | 9 | #import "GPUImageFilter.h" 10 | 11 | @interface GPUImageHighlightShadowTintFilter : GPUImageFilter 12 | { 13 | GLint shadowTintIntensityUniform, highlightTintIntensityUniform, shadowTintColorUniform, highlightTintColorUniform; 14 | } 15 | 16 | // The shadowTint and highlightTint colors specify what colors replace the dark and light areas of the image, respectively. The defaults for shadows are black, highlighs white. 17 | @property(readwrite, nonatomic) GLfloat shadowTintIntensity; 18 | @property(readwrite, nonatomic) GPUVector4 shadowTintColor; 19 | @property(readwrite, nonatomic) GLfloat highlightTintIntensity; 20 | @property(readwrite, nonatomic) GPUVector4 highlightTintColor; 21 | 22 | - (void)setShadowTintColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; 23 | - (void)setHighlightTintColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; 24 | 25 | @end 26 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHistogramEqualizationFilter.h: -------------------------------------------------------------------------------- 1 | // 2 | // GPUImageHistogramEqualizationFilter.h 3 | // FilterShowcase 4 | // 5 | // Created by Adam Marcus on 19/08/2014. 6 | // Copyright (c) 2014 Sunset Lake Software LLC. All rights reserved. 7 | // 8 | 9 | #import "GPUImageFilterGroup.h" 10 | #import "GPUImageHistogramFilter.h" 11 | #import "GPUImageRawDataOutput.h" 12 | #import "GPUImageRawDataInput.h" 13 | #import "GPUImageTwoInputFilter.h" 14 | 15 | @interface GPUImageHistogramEqualizationFilter : GPUImageFilterGroup 16 | { 17 | GPUImageHistogramFilter *histogramFilter; 18 | GPUImageRawDataOutput *rawDataOutputFilter; 19 | GPUImageRawDataInput *rawDataInputFilter; 20 | } 21 | 22 | @property(readwrite, nonatomic) NSUInteger downsamplingFactor; 23 | 24 | - (id)initWithHistogramType:(GPUImageHistogramType)newHistogramType; 25 | 26 | @end 27 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHistogramFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | typedef enum { kGPUImageHistogramRed, kGPUImageHistogramGreen, kGPUImageHistogramBlue, kGPUImageHistogramRGB, kGPUImageHistogramLuminance} GPUImageHistogramType; 4 | 5 | @interface GPUImageHistogramFilter : GPUImageFilter 6 | { 7 | GPUImageHistogramType histogramType; 8 | 9 | GLubyte *vertexSamplingCoordinates; 10 | 11 | GLProgram *secondFilterProgram, *thirdFilterProgram; 12 | GLint secondFilterPositionAttribute, thirdFilterPositionAttribute; 13 | } 14 | 15 | // Rather than sampling every pixel, this dictates what fraction of the image is sampled. By default, this is 16 with a minimum of 1. 16 | @property(readwrite, nonatomic) NSUInteger downsamplingFactor; 17 | 18 | // Initialization and teardown 19 | - (id)initWithHistogramType:(GPUImageHistogramType)newHistogramType; 20 | - (void)initializeSecondaryAttributes; 21 | 22 | @end 23 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHistogramGenerator.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageHistogramGenerator : GPUImageFilter 4 | { 5 | GLint backgroundColorUniform; 6 | } 7 | 8 | @end 9 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHueBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageHueBlendFilter : GPUImageTwoInputFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageHueFilter.h: -------------------------------------------------------------------------------- 1 | 2 | #import "GPUImageFilter.h" 3 | 4 | @interface GPUImageHueFilter : GPUImageFilter 5 | { 6 | GLint hueAdjustUniform; 7 | 8 | } 9 | @property (nonatomic, readwrite) CGFloat hue; 10 | 11 | @end 12 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageJFAVoronoiFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageJFAVoronoiFilter : GPUImageFilter 4 | { 5 | GLuint secondFilterOutputTexture; 6 | GLuint secondFilterFramebuffer; 7 | 8 | 9 | GLint sampleStepUniform; 10 | GLint sizeUniform; 11 | NSUInteger numPasses; 12 | 13 | } 14 | 15 | @property (nonatomic, readwrite) CGSize sizeInPixels; 16 | 17 | @end -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** Kuwahara image abstraction, drawn from the work of Kyprianidis, et. al. in their publication "Anisotropic Kuwahara Filtering on the GPU" within the GPU Pro collection. This produces an oil-painting-like image, but it is extremely computationally expensive, so it can take seconds to render a frame on an iPad 2. This might be best used for still images. 4 | */ 5 | @interface GPUImageKuwaharaFilter : GPUImageFilter 6 | { 7 | GLint radiusUniform; 8 | } 9 | 10 | /// The radius to sample from when creating the brush-stroke effect, with a default of 3. The larger the radius, the slower the filter. 11 | @property(readwrite, nonatomic) NSUInteger radius; 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageKuwaharaRadius3Filter.h: -------------------------------------------------------------------------------- 1 | // 2 | // GPUImageKuwaharaRadius3Filter.h 3 | 4 | #import "GPUImageFilter.h" 5 | 6 | @interface GPUImageKuwaharaRadius3Filter : GPUImageFilter 7 | 8 | @end 9 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLanczosResamplingFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoPassTextureSamplingFilter.h" 2 | 3 | @interface GPUImageLanczosResamplingFilter : GPUImageTwoPassTextureSamplingFilter 4 | 5 | @property(readwrite, nonatomic) CGSize originalImageSize; 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLaplacianFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImage3x3ConvolutionFilter.h" 2 | 3 | @interface GPUImageLaplacianFilter : GPUImage3x3ConvolutionFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLevelsFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** 4 | * Levels like Photoshop. 5 | * 6 | * The min, max, minOut and maxOut parameters are floats in the range [0, 1]. 7 | * If you have parameters from Photoshop in the range [0, 255] you must first 8 | * convert them to be [0, 1]. 9 | * The gamma/mid parameter is a float >= 0. This matches the value from Photoshop. 10 | * 11 | * If you want to apply levels to RGB as well as individual channels you need to use 12 | * this filter twice - first for the individual channels and then for all channels. 13 | */ 14 | @interface GPUImageLevelsFilter : GPUImageFilter 15 | { 16 | GLint minUniform; 17 | GLint midUniform; 18 | GLint maxUniform; 19 | GLint minOutputUniform; 20 | GLint maxOutputUniform; 21 | 22 | GPUVector3 minVector, midVector, maxVector, minOutputVector, maxOutputVector; 23 | } 24 | 25 | /** Set levels for the red channel */ 26 | - (void)setRedMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut; 27 | 28 | - (void)setRedMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max; 29 | 30 | /** Set levels for the green channel */ 31 | - (void)setGreenMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut; 32 | 33 | - (void)setGreenMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max; 34 | 35 | /** Set levels for the blue channel */ 36 | - (void)setBlueMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut; 37 | 38 | - (void)setBlueMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max; 39 | 40 | /** Set levels for all channels at once */ 41 | - (void)setMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max minOut:(CGFloat)minOut maxOut:(CGFloat)maxOut; 42 | - (void)setMin:(CGFloat)min gamma:(CGFloat)mid max:(CGFloat)max; 43 | 44 | @end 45 | 46 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLightenBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | /// Blends two images by taking the maximum value of each color component between the images 4 | @interface GPUImageLightenBlendFilter : GPUImageTwoInputFilter 5 | { 6 | } 7 | 8 | @end 9 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLightenBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageLightenBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageLightenBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | void main() 13 | { 14 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 15 | lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 16 | 17 | gl_FragColor = max(textureColor, textureColor2); 18 | } 19 | ); 20 | #else 21 | NSString *const kGPUImageLightenBlendFragmentShaderString = SHADER_STRING 22 | ( 23 | varying vec2 textureCoordinate; 24 | varying vec2 textureCoordinate2; 25 | 26 | uniform sampler2D inputImageTexture; 27 | uniform sampler2D inputImageTexture2; 28 | 29 | void main() 30 | { 31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 33 | 34 | gl_FragColor = max(textureColor, textureColor2); 35 | } 36 | ); 37 | #endif 38 | 39 | @implementation GPUImageLightenBlendFilter 40 | 41 | - (id)init; 42 | { 43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageLightenBlendFragmentShaderString])) 44 | { 45 | return nil; 46 | } 47 | 48 | return self; 49 | } 50 | 51 | @end 52 | 53 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLineGenerator.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageLineGenerator : GPUImageFilter 4 | { 5 | GLint lineWidthUniform, lineColorUniform; 6 | GLfloat *lineCoordinates; 7 | } 8 | 9 | // The width of the displayed lines, in pixels. The default is 1. 10 | @property(readwrite, nonatomic) CGFloat lineWidth; 11 | 12 | // The color of the lines is specified using individual red, green, and blue components (normalized to 1.0). The default is green: (0.0, 1.0, 0.0). 13 | - (void)setLineColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; 14 | 15 | // Rendering 16 | - (void)renderLinesFromArray:(GLfloat *)lineSlopeAndIntercepts count:(NSUInteger)numberOfLines frameTime:(CMTime)frameTime; 17 | 18 | @end 19 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLinearBurnBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageLinearBurnBlendFilter : GPUImageTwoInputFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLinearBurnBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageLinearBurnBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageLinearBurnBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | void main() 13 | { 14 | mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 15 | mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 16 | 17 | gl_FragColor = vec4(clamp(textureColor.rgb + textureColor2.rgb - vec3(1.0), vec3(0.0), vec3(1.0)), textureColor.a); 18 | } 19 | ); 20 | #else 21 | NSString *const kGPUImageLinearBurnBlendFragmentShaderString = SHADER_STRING 22 | ( 23 | varying vec2 textureCoordinate; 24 | varying vec2 textureCoordinate2; 25 | 26 | uniform sampler2D inputImageTexture; 27 | uniform sampler2D inputImageTexture2; 28 | 29 | void main() 30 | { 31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 33 | 34 | gl_FragColor = vec4(clamp(textureColor.rgb + textureColor2.rgb - vec3(1.0), vec3(0.0), vec3(1.0)), textureColor.a); 35 | } 36 | ); 37 | #endif 38 | 39 | @implementation GPUImageLinearBurnBlendFilter 40 | 41 | - (id)init; 42 | { 43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageLinearBurnBlendFragmentShaderString])) 44 | { 45 | return nil; 46 | } 47 | 48 | return self; 49 | } 50 | 51 | @end 52 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLocalBinaryPatternFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImage3x3TextureSamplingFilter.h" 2 | 3 | @interface GPUImageLocalBinaryPatternFilter : GPUImage3x3TextureSamplingFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLookupFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageLookupFilter : GPUImageTwoInputFilter 4 | { 5 | GLint intensityUniform; 6 | } 7 | 8 | // How To Use: 9 | // 1) Use your favourite photo editing application to apply a filter to lookup.png from GPUImage/framework/Resources. 10 | // For this to work properly each pixel color must not depend on other pixels (e.g. blur will not work). 11 | // If you need more complex filter you can create as many lookup tables as required. 12 | // E.g. color_balance_lookup_1.png -> GPUImageGaussianBlurFilter -> color_balance_lookup_2.png 13 | // 2) Use you new lookup.png file as a second input for GPUImageLookupFilter. 14 | 15 | // See GPUImageAmatorkaFilter, GPUImageMissEtikateFilter, and GPUImageSoftEleganceFilter for example. 16 | 17 | // Additional Info: 18 | // Lookup texture is organised as 8x8 quads of 64x64 pixels representing all possible RGB colors: 19 | //for (int by = 0; by < 8; by++) { 20 | // for (int bx = 0; bx < 8; bx++) { 21 | // for (int g = 0; g < 64; g++) { 22 | // for (int r = 0; r < 64; r++) { 23 | // image.setPixel(r + bx * 64, g + by * 64, qRgb((int)(r * 255.0 / 63.0 + 0.5), 24 | // (int)(g * 255.0 / 63.0 + 0.5), 25 | // (int)((bx + by * 8.0) * 255.0 / 63.0 + 0.5))); 26 | // } 27 | // } 28 | // } 29 | //} 30 | 31 | // Opacity/intensity of lookup filter ranges from 0.0 to 1.0, with 1.0 as the normal setting 32 | @property(readwrite, nonatomic) CGFloat intensity; 33 | 34 | @end 35 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLowPassFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | #import "GPUImageBuffer.h" 3 | #import "GPUImageDissolveBlendFilter.h" 4 | 5 | @interface GPUImageLowPassFilter : GPUImageFilterGroup 6 | { 7 | GPUImageBuffer *bufferFilter; 8 | GPUImageDissolveBlendFilter *dissolveBlendFilter; 9 | } 10 | 11 | // This controls the degree by which the previous accumulated frames are blended with the current one. This ranges from 0.0 to 1.0, with a default of 0.5. 12 | @property(readwrite, nonatomic) CGFloat filterStrength; 13 | 14 | @end 15 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLowPassFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageLowPassFilter.h" 2 | 3 | @implementation GPUImageLowPassFilter 4 | 5 | @synthesize filterStrength; 6 | 7 | - (id)init; 8 | { 9 | if (!(self = [super init])) 10 | { 11 | return nil; 12 | } 13 | 14 | // Take in the frame and blend it with the previous one 15 | dissolveBlendFilter = [[GPUImageDissolveBlendFilter alloc] init]; 16 | [self addFilter:dissolveBlendFilter]; 17 | 18 | // Buffer the result to be fed back into the blend 19 | bufferFilter = [[GPUImageBuffer alloc] init]; 20 | [self addFilter:bufferFilter]; 21 | 22 | // Texture location 0 needs to be the original image for the dissolve blend 23 | [bufferFilter addTarget:dissolveBlendFilter atTextureLocation:1]; 24 | [dissolveBlendFilter addTarget:bufferFilter]; 25 | 26 | [dissolveBlendFilter disableSecondFrameCheck]; 27 | 28 | // To prevent double updating of this filter, disable updates from the sharp image side 29 | // self.inputFilterToIgnoreForUpdates = unsharpMaskFilter; 30 | 31 | self.initialFilters = [NSArray arrayWithObject:dissolveBlendFilter]; 32 | self.terminalFilter = dissolveBlendFilter; 33 | 34 | self.filterStrength = 0.5; 35 | 36 | return self; 37 | } 38 | 39 | #pragma mark - 40 | #pragma mark Accessors 41 | 42 | - (void)setFilterStrength:(CGFloat)newValue; 43 | { 44 | dissolveBlendFilter.mix = newValue; 45 | } 46 | 47 | - (CGFloat)filterStrength; 48 | { 49 | return dissolveBlendFilter.mix; 50 | } 51 | 52 | - (void)addTarget:(id)newTarget atTextureLocation:(NSInteger)textureLocation; 53 | { 54 | [self.terminalFilter addTarget:newTarget atTextureLocation:textureLocation]; 55 | //if use GPUImagePipline,will cause self.termainlFilter removeAllTargets,so need add bufferFilter back 56 | if (self.terminalFilter == dissolveBlendFilter && ![self.terminalFilter.targets containsObject:bufferFilter]) { 57 | [self.terminalFilter addTarget:bufferFilter atTextureLocation:1]; 58 | } 59 | } 60 | 61 | @end 62 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceRangeFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageLuminanceRangeFilter : GPUImageFilter 4 | { 5 | GLint rangeReductionUniform; 6 | } 7 | 8 | /** The degree to reduce the luminance range, from 0.0 to 1.0. Default is 0.6. 9 | */ 10 | @property(readwrite, nonatomic) CGFloat rangeReductionFactor; 11 | 12 | @end 13 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceThresholdFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** Pixels with a luminance above the threshold will appear white, and those below will be black 4 | */ 5 | @interface GPUImageLuminanceThresholdFilter : GPUImageFilter 6 | { 7 | GLint thresholdUniform; 8 | } 9 | 10 | /** Anything above this luminance will be white, and anything below black. Ranges from 0.0 to 1.0, with 0.5 as the default 11 | */ 12 | @property(readwrite, nonatomic) CGFloat threshold; 13 | 14 | @end 15 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLuminanceThresholdFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageLuminanceThresholdFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageLuminanceThresholdFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | 8 | uniform sampler2D inputImageTexture; 9 | uniform highp float threshold; 10 | 11 | const highp vec3 W = vec3(0.2125, 0.7154, 0.0721); 12 | 13 | void main() 14 | { 15 | highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 16 | highp float luminance = dot(textureColor.rgb, W); 17 | highp float thresholdResult = step(threshold, luminance); 18 | 19 | gl_FragColor = vec4(vec3(thresholdResult), textureColor.w); 20 | } 21 | ); 22 | #else 23 | NSString *const kGPUImageLuminanceThresholdFragmentShaderString = SHADER_STRING 24 | ( 25 | varying vec2 textureCoordinate; 26 | 27 | uniform sampler2D inputImageTexture; 28 | uniform float threshold; 29 | 30 | const vec3 W = vec3(0.2125, 0.7154, 0.0721); 31 | 32 | void main() 33 | { 34 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 35 | float luminance = dot(textureColor.rgb, W); 36 | float thresholdResult = step(threshold, luminance); 37 | 38 | gl_FragColor = vec4(vec3(thresholdResult), textureColor.w); 39 | } 40 | ); 41 | #endif 42 | 43 | @implementation GPUImageLuminanceThresholdFilter 44 | 45 | @synthesize threshold = _threshold; 46 | 47 | #pragma mark - 48 | #pragma mark Initialization 49 | 50 | - (id)init; 51 | { 52 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageLuminanceThresholdFragmentShaderString])) 53 | { 54 | return nil; 55 | } 56 | 57 | thresholdUniform = [filterProgram uniformIndex:@"threshold"]; 58 | self.threshold = 0.5; 59 | 60 | return self; 61 | } 62 | 63 | #pragma mark - 64 | #pragma mark Accessors 65 | 66 | - (void)setThreshold:(CGFloat)newValue; 67 | { 68 | _threshold = newValue; 69 | 70 | [self setFloat:_threshold forUniform:thresholdUniform program:filterProgram]; 71 | } 72 | 73 | @end 74 | 75 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLuminosity.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageAverageColor.h" 2 | 3 | @interface GPUImageLuminosity : GPUImageAverageColor 4 | { 5 | GLProgram *secondFilterProgram; 6 | GLint secondFilterPositionAttribute, secondFilterTextureCoordinateAttribute; 7 | GLint secondFilterInputTextureUniform, secondFilterInputTextureUniform2; 8 | GLint secondFilterTexelWidthUniform, secondFilterTexelHeightUniform; 9 | } 10 | 11 | // This block is called on the completion of color averaging for a frame 12 | @property(nonatomic, copy) void(^luminosityProcessingFinishedBlock)(CGFloat luminosity, CMTime frameTime); 13 | 14 | - (void)extractLuminosityAtFrameTime:(CMTime)frameTime; 15 | - (void)initializeSecondaryAttributes; 16 | 17 | @end 18 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageLuminosityBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageLuminosityBlendFilter : GPUImageTwoInputFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageMaskFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageMaskFilter : GPUImageTwoInputFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageMedianFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImage3x3TextureSamplingFilter.h" 2 | 3 | @interface GPUImageMedianFilter : GPUImage3x3TextureSamplingFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageMissEtikateFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImagePicture; 4 | 5 | /** A photo filter based on Photoshop action by Miss Etikate: 6 | http://miss-etikate.deviantart.com/art/Photoshop-Action-15-120151961 7 | */ 8 | 9 | // Note: If you want to use this effect you have to add lookup_miss_etikate.png 10 | // from Resources folder to your application bundle. 11 | 12 | @interface GPUImageMissEtikateFilter : GPUImageFilterGroup 13 | { 14 | GPUImagePicture *lookupImageSource; 15 | } 16 | 17 | @end 18 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageMissEtikateFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageMissEtikateFilter.h" 2 | #import "GPUImagePicture.h" 3 | #import "GPUImageLookupFilter.h" 4 | 5 | @implementation GPUImageMissEtikateFilter 6 | 7 | - (id)init; 8 | { 9 | if (!(self = [super init])) 10 | { 11 | return nil; 12 | } 13 | 14 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 15 | UIImage *image = [UIImage imageNamed:@"lookup_miss_etikate.png"]; 16 | #else 17 | NSImage *image = [NSImage imageNamed:@"lookup_miss_etikate.png"]; 18 | #endif 19 | 20 | NSAssert(image, @"To use GPUImageMissEtikateFilter you need to add lookup_miss_etikate.png from GPUImage/framework/Resources to your application bundle."); 21 | 22 | lookupImageSource = [[GPUImagePicture alloc] initWithImage:image]; 23 | GPUImageLookupFilter *lookupFilter = [[GPUImageLookupFilter alloc] init]; 24 | [self addFilter:lookupFilter]; 25 | 26 | [lookupImageSource addTarget:lookupFilter atTextureLocation:1]; 27 | [lookupImageSource processImage]; 28 | 29 | self.initialFilters = [NSArray arrayWithObjects:lookupFilter, nil]; 30 | self.terminalFilter = lookupFilter; 31 | 32 | return self; 33 | } 34 | 35 | #pragma mark - 36 | #pragma mark Accessors 37 | 38 | @end 39 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageMonochromeFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageMonochromeFilter : GPUImageFilter 4 | { 5 | GLint intensityUniform, filterColorUniform; 6 | } 7 | 8 | @property(readwrite, nonatomic) CGFloat intensity; 9 | @property(readwrite, nonatomic) GPUVector4 color; 10 | 11 | - (void)setColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent; 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageMosaicFilter.h: -------------------------------------------------------------------------------- 1 | 2 | // This needs a little more work, it's rotating the input tileset and there are some artifacts (I think from GL_LINEAR interpolation), but it's working 3 | 4 | #import "GPUImageTwoInputFilter.h" 5 | #import "GPUImagePicture.h" 6 | 7 | @interface GPUImageMosaicFilter : GPUImageTwoInputFilter { 8 | GLint inputTileSizeUniform, numTilesUniform, displayTileSizeUniform, colorOnUniform; 9 | GPUImagePicture *pic; 10 | } 11 | 12 | // This filter takes an input tileset, the tiles must ascend in luminance 13 | // It looks at the input image and replaces each display tile with an input tile 14 | // according to the luminance of that tile. The idea was to replicate the ASCII 15 | // video filters seen in other apps, but the tileset can be anything. 16 | @property(readwrite, nonatomic) CGSize inputTileSize; 17 | @property(readwrite, nonatomic) float numTiles; 18 | @property(readwrite, nonatomic) CGSize displayTileSize; 19 | @property(readwrite, nonatomic) BOOL colorOn; 20 | @property(readwrite, nonatomic, copy) NSString *tileSet; 21 | 22 | @end 23 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageMotionBlurFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageMotionBlurFilter : GPUImageFilter 4 | 5 | /** A multiplier for the blur size, ranging from 0.0 on up, with a default of 1.0 6 | */ 7 | @property (readwrite, nonatomic) CGFloat blurSize; 8 | 9 | /** The angular direction of the blur, in degrees. 0 degrees by default 10 | */ 11 | @property (readwrite, nonatomic) CGFloat blurAngle; 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageMotionDetector.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | #import "GPUImageLowPassFilter.h" 3 | #import "GPUImageAverageColor.h" 4 | 5 | @interface GPUImageMotionDetector : GPUImageFilterGroup 6 | { 7 | GPUImageLowPassFilter *lowPassFilter; 8 | GPUImageTwoInputFilter *frameComparisonFilter; 9 | GPUImageAverageColor *averageColor; 10 | } 11 | 12 | // This controls the low pass filter strength used to compare the current frame with previous ones to detect motion. This ranges from 0.0 to 1.0, with a default of 0.5. 13 | @property(readwrite, nonatomic) CGFloat lowPassFilterStrength; 14 | 15 | // For every frame, this will feed back the calculated centroid of the motion, as well as a relative intensity. 16 | @property(nonatomic, copy) void(^motionDetectionBlock)(CGPoint motionCentroid, CGFloat motionIntensity, CMTime frameTime); 17 | 18 | @end 19 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageMovieComposition.h: -------------------------------------------------------------------------------- 1 | // 2 | // GPUImageMovieComposition.h 3 | // Givit 4 | // 5 | // Created by Sean Meiners on 2013/01/25. 6 | // 7 | // 8 | 9 | #import "GPUImageMovie.h" 10 | 11 | @interface GPUImageMovieComposition : GPUImageMovie 12 | 13 | @property (readwrite, retain) AVComposition *compositon; 14 | @property (readwrite, retain) AVVideoComposition *videoComposition; 15 | @property (readwrite, retain) AVAudioMix *audioMix; 16 | 17 | - (id)initWithComposition:(AVComposition*)compositon 18 | andVideoComposition:(AVVideoComposition*)videoComposition 19 | andAudioMix:(AVAudioMix*)audioMix; 20 | 21 | @end 22 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageMultiplyBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageMultiplyBlendFilter : GPUImageTwoInputFilter 4 | { 5 | } 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageMultiplyBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageMultiplyBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageMultiplyBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | void main() 13 | { 14 | lowp vec4 base = texture2D(inputImageTexture, textureCoordinate); 15 | lowp vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2); 16 | 17 | gl_FragColor = overlayer * base + overlayer * (1.0 - base.a) + base * (1.0 - overlayer.a); 18 | } 19 | ); 20 | #else 21 | NSString *const kGPUImageMultiplyBlendFragmentShaderString = SHADER_STRING 22 | ( 23 | varying vec2 textureCoordinate; 24 | varying vec2 textureCoordinate2; 25 | 26 | uniform sampler2D inputImageTexture; 27 | uniform sampler2D inputImageTexture2; 28 | 29 | void main() 30 | { 31 | vec4 base = texture2D(inputImageTexture, textureCoordinate); 32 | vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2); 33 | 34 | gl_FragColor = overlayer * base + overlayer * (1.0 - base.a) + base * (1.0 - overlayer.a); 35 | } 36 | ); 37 | #endif 38 | 39 | @implementation GPUImageMultiplyBlendFilter 40 | 41 | - (id)init; 42 | { 43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageMultiplyBlendFragmentShaderString])) 44 | { 45 | return nil; 46 | } 47 | 48 | return self; 49 | } 50 | 51 | @end 52 | 53 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageNobleCornerDetectionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageHarrisCornerDetectionFilter.h" 2 | 3 | /** Noble corner detector 4 | 5 | This is the Noble variant on the Harris detector, from 6 | Alison Noble, "Descriptions of Image Surfaces", PhD thesis, Department of Engineering Science, Oxford University 1989, p45. 7 | */ 8 | 9 | 10 | @interface GPUImageNobleCornerDetectionFilter : GPUImageHarrisCornerDetectionFilter 11 | 12 | @end 13 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageNonMaximumSuppressionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImage3x3TextureSamplingFilter.h" 2 | 3 | @interface GPUImageNonMaximumSuppressionFilter : GPUImage3x3TextureSamplingFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageNormalBlendFilter.h: -------------------------------------------------------------------------------- 1 | // Created by Jorge Garcia on 9/5/12. 2 | // 3 | 4 | #import "GPUImageTwoInputFilter.h" 5 | 6 | @interface GPUImageNormalBlendFilter : GPUImageTwoInputFilter 7 | 8 | @end 9 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageOpacityFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageOpacityFilter : GPUImageFilter 4 | { 5 | GLint opacityUniform; 6 | } 7 | 8 | // Opacity ranges from 0.0 to 1.0, with 1.0 as the normal setting 9 | @property(readwrite, nonatomic) CGFloat opacity; 10 | 11 | @end 12 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageOpacityFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageOpacityFilter.h" 2 | 3 | @implementation GPUImageOpacityFilter 4 | 5 | @synthesize opacity = _opacity; 6 | 7 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 8 | NSString *const kGPUImageOpacityFragmentShaderString = SHADER_STRING 9 | ( 10 | varying highp vec2 textureCoordinate; 11 | 12 | uniform sampler2D inputImageTexture; 13 | uniform lowp float opacity; 14 | 15 | void main() 16 | { 17 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 18 | 19 | gl_FragColor = vec4(textureColor.rgb, textureColor.a * opacity); 20 | } 21 | ); 22 | #else 23 | NSString *const kGPUImageOpacityFragmentShaderString = SHADER_STRING 24 | ( 25 | varying vec2 textureCoordinate; 26 | 27 | uniform sampler2D inputImageTexture; 28 | uniform float opacity; 29 | 30 | void main() 31 | { 32 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 33 | 34 | gl_FragColor = vec4(textureColor.rgb, textureColor.a * opacity); 35 | } 36 | ); 37 | #endif 38 | 39 | #pragma mark - 40 | #pragma mark Initialization and teardown 41 | 42 | - (id)init; 43 | { 44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageOpacityFragmentShaderString])) 45 | { 46 | return nil; 47 | } 48 | 49 | opacityUniform = [filterProgram uniformIndex:@"opacity"]; 50 | self.opacity = 1.0; 51 | 52 | return self; 53 | } 54 | 55 | #pragma mark - 56 | #pragma mark Accessors 57 | 58 | - (void)setOpacity:(CGFloat)newValue; 59 | { 60 | _opacity = newValue; 61 | 62 | [self setFloat:_opacity forUniform:opacityUniform program:filterProgram]; 63 | } 64 | 65 | @end 66 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageOpeningFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImageErosionFilter; 4 | @class GPUImageDilationFilter; 5 | 6 | // A filter that first performs an erosion on the red channel of an image, followed by a dilation of the same radius. 7 | // This helps to filter out smaller bright elements. 8 | 9 | @interface GPUImageOpeningFilter : GPUImageFilterGroup 10 | { 11 | GPUImageErosionFilter *erosionFilter; 12 | GPUImageDilationFilter *dilationFilter; 13 | } 14 | 15 | @property(readwrite, nonatomic) CGFloat verticalTexelSpacing, horizontalTexelSpacing; 16 | 17 | - (id)initWithRadius:(NSUInteger)radius; 18 | 19 | @end 20 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageOpeningFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageOpeningFilter.h" 2 | #import "GPUImageErosionFilter.h" 3 | #import "GPUImageDilationFilter.h" 4 | 5 | @implementation GPUImageOpeningFilter 6 | 7 | @synthesize verticalTexelSpacing = _verticalTexelSpacing; 8 | @synthesize horizontalTexelSpacing = _horizontalTexelSpacing; 9 | 10 | - (id)init; 11 | { 12 | if (!(self = [self initWithRadius:1])) 13 | { 14 | return nil; 15 | } 16 | 17 | return self; 18 | } 19 | 20 | - (id)initWithRadius:(NSUInteger)radius; 21 | { 22 | if (!(self = [super init])) 23 | { 24 | return nil; 25 | } 26 | 27 | // First pass: erosion 28 | erosionFilter = [[GPUImageErosionFilter alloc] initWithRadius:radius]; 29 | [self addFilter:erosionFilter]; 30 | 31 | // Second pass: dilation 32 | dilationFilter = [[GPUImageDilationFilter alloc] initWithRadius:radius]; 33 | [self addFilter:dilationFilter]; 34 | 35 | [erosionFilter addTarget:dilationFilter]; 36 | 37 | self.initialFilters = [NSArray arrayWithObjects:erosionFilter, nil]; 38 | self.terminalFilter = dilationFilter; 39 | 40 | return self; 41 | } 42 | 43 | - (void)setVerticalTexelSpacing:(CGFloat)newValue; 44 | { 45 | _verticalTexelSpacing = newValue; 46 | erosionFilter.verticalTexelSpacing = newValue; 47 | dilationFilter.verticalTexelSpacing = newValue; 48 | } 49 | 50 | - (void)setHorizontalTexelSpacing:(CGFloat)newValue; 51 | { 52 | _horizontalTexelSpacing = newValue; 53 | erosionFilter.horizontalTexelSpacing = newValue; 54 | dilationFilter.horizontalTexelSpacing = newValue; 55 | } 56 | 57 | @end 58 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageOverlayBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageOverlayBlendFilter : GPUImageTwoInputFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageParallelCoordinateLineTransformFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | // This is an accumulator that uses a Hough transform in parallel coordinate space to identify probable lines in a scene. 4 | // 5 | // It is entirely based on the work of the Graph@FIT research group at the Brno University of Technology and their publications: 6 | // M. Dubská, J. Havel, and A. Herout. Real-Time Detection of Lines using Parallel Coordinates and OpenGL. Proceedings of SCCG 2011, Bratislava, SK, p. 7. 7 | // M. Dubská, J. Havel, and A. Herout. PClines — Line detection using parallel coordinates. 2011 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), p. 1489- 1494. 8 | 9 | @interface GPUImageParallelCoordinateLineTransformFilter : GPUImageFilter 10 | { 11 | GLubyte *rawImagePixels; 12 | GLfloat *lineCoordinates; 13 | unsigned int maxLinePairsToRender, linePairsToRender; 14 | } 15 | 16 | @end 17 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImagePerlinNoiseFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImagePerlinNoiseFilter : GPUImageFilter 4 | { 5 | GLint scaleUniform, colorStartUniform, colorFinishUniform; 6 | } 7 | 8 | @property (readwrite, nonatomic) GPUVector4 colorStart; 9 | @property (readwrite, nonatomic) GPUVector4 colorFinish; 10 | 11 | @property (readwrite, nonatomic) float scale; 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImagePinchDistortionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** Creates a pinch distortion of the image 4 | */ 5 | @interface GPUImagePinchDistortionFilter : GPUImageFilter 6 | { 7 | GLint aspectRatioUniform, radiusUniform, centerUniform, scaleUniform; 8 | } 9 | 10 | /** The center about which to apply the distortion, with a default of (0.5, 0.5) 11 | */ 12 | @property(readwrite, nonatomic) CGPoint center; 13 | /** The radius of the distortion, ranging from 0.0 to 2.0, with a default of 1.0 14 | */ 15 | @property(readwrite, nonatomic) CGFloat radius; 16 | /** The amount of distortion to apply, from -2.0 to 2.0, with a default of 0.5 17 | */ 18 | @property(readwrite, nonatomic) CGFloat scale; 19 | 20 | @end 21 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImagePixellateFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImagePixellateFilter : GPUImageFilter 4 | { 5 | GLint fractionalWidthOfAPixelUniform, aspectRatioUniform; 6 | } 7 | 8 | // The fractional width of the image to use as a size for the pixels in the resulting image. Values below one pixel width in the source image are ignored. 9 | @property(readwrite, nonatomic) CGFloat fractionalWidthOfAPixel; 10 | 11 | 12 | @end 13 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImagePixellatePositionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImagePixellatePositionFilter : GPUImageFilter 4 | { 5 | GLint fractionalWidthOfAPixelUniform, aspectRatioUniform, centerUniform, radiusUniform; 6 | } 7 | 8 | // The fractional width of the image to use as a size for the pixels in the resulting image. Values below one pixel width in the source image are ignored. 9 | @property(readwrite, nonatomic) CGFloat fractionalWidthOfAPixel; 10 | 11 | // the center point to start pixelation in texture coordinates, default 0.5, 0.5 12 | @property(readwrite, nonatomic) CGPoint center; 13 | 14 | // the radius (0.0 - 1.0) in which to pixelate, default 1.0 15 | @property(readwrite, nonatomic) CGFloat radius; 16 | 17 | @end 18 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImagePoissonBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputCrossTextureSamplingFilter.h" 2 | #import "GPUImageFilterGroup.h" 3 | 4 | @interface GPUImagePoissonBlendFilter : GPUImageTwoInputCrossTextureSamplingFilter 5 | { 6 | GLint mixUniform; 7 | 8 | GPUImageFramebuffer *secondOutputFramebuffer; 9 | } 10 | 11 | // Mix ranges from 0.0 (only image 1) to 1.0 (only image 2 gradients), with 1.0 as the normal level 12 | @property(readwrite, nonatomic) CGFloat mix; 13 | 14 | // The number of times to propagate the gradients. 15 | // Crank this up to 100 or even 1000 if you want to get anywhere near convergence. Yes, this will be slow. 16 | @property(readwrite, nonatomic) NSUInteger numIterations; 17 | 18 | @end -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImagePolarPixellateFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImagePolarPixellateFilter : GPUImageFilter { 4 | GLint centerUniform, pixelSizeUniform; 5 | } 6 | 7 | // The center about which to apply the distortion, with a default of (0.5, 0.5) 8 | @property(readwrite, nonatomic) CGPoint center; 9 | // The amount of distortion to apply, from (-2.0, -2.0) to (2.0, 2.0), with a default of (0.05, 0.05) 10 | @property(readwrite, nonatomic) CGSize pixelSize; 11 | 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImagePolkaDotFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImagePixellateFilter.h" 2 | 3 | @interface GPUImagePolkaDotFilter : GPUImagePixellateFilter 4 | { 5 | GLint dotScalingUniform; 6 | } 7 | 8 | @property(readwrite, nonatomic) CGFloat dotScaling; 9 | 10 | @end 11 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImagePosterizeFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** This reduces the color dynamic range into the number of steps specified, leading to a cartoon-like simple shading of the image. 4 | */ 5 | @interface GPUImagePosterizeFilter : GPUImageFilter 6 | { 7 | GLint colorLevelsUniform; 8 | } 9 | 10 | /** The number of color levels to reduce the image space to. This ranges from 1 to 256, with a default of 10. 11 | */ 12 | @property(readwrite, nonatomic) NSUInteger colorLevels; 13 | 14 | @end 15 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImagePosterizeFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImagePosterizeFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImagePosterizeFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | 8 | uniform sampler2D inputImageTexture; 9 | uniform highp float colorLevels; 10 | 11 | void main() 12 | { 13 | highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 14 | 15 | gl_FragColor = floor((textureColor * colorLevels) + vec4(0.5)) / colorLevels; 16 | } 17 | ); 18 | #else 19 | NSString *const kGPUImagePosterizeFragmentShaderString = SHADER_STRING 20 | ( 21 | varying vec2 textureCoordinate; 22 | 23 | uniform sampler2D inputImageTexture; 24 | uniform float colorLevels; 25 | 26 | void main() 27 | { 28 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 29 | 30 | gl_FragColor = floor((textureColor * colorLevels) + vec4(0.5)) / colorLevels; 31 | } 32 | ); 33 | #endif 34 | 35 | @implementation GPUImagePosterizeFilter 36 | 37 | @synthesize colorLevels = _colorLevels; 38 | 39 | #pragma mark - 40 | #pragma mark Initialization 41 | 42 | - (id)init; 43 | { 44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImagePosterizeFragmentShaderString])) 45 | { 46 | return nil; 47 | } 48 | 49 | colorLevelsUniform = [filterProgram uniformIndex:@"colorLevels"]; 50 | self.colorLevels = 10; 51 | 52 | return self; 53 | } 54 | 55 | #pragma mark - 56 | #pragma mark Accessors 57 | 58 | - (void)setColorLevels:(NSUInteger)newValue; 59 | { 60 | _colorLevels = newValue; 61 | 62 | [self setFloat:_colorLevels forUniform:colorLevelsUniform program:filterProgram]; 63 | } 64 | 65 | @end 66 | 67 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImagePrewittEdgeDetectionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageSobelEdgeDetectionFilter.h" 2 | 3 | @interface GPUImagePrewittEdgeDetectionFilter : GPUImageSobelEdgeDetectionFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageRGBClosingFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImageRGBErosionFilter; 4 | @class GPUImageRGBDilationFilter; 5 | 6 | // A filter that first performs a dilation on each color channel of an image, followed by an erosion of the same radius. 7 | // This helps to filter out smaller dark elements. 8 | 9 | @interface GPUImageRGBClosingFilter : GPUImageFilterGroup 10 | { 11 | GPUImageRGBErosionFilter *erosionFilter; 12 | GPUImageRGBDilationFilter *dilationFilter; 13 | } 14 | 15 | - (id)initWithRadius:(NSUInteger)radius; 16 | 17 | 18 | @end 19 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageRGBClosingFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageRGBClosingFilter.h" 2 | #import "GPUImageRGBErosionFilter.h" 3 | #import "GPUImageRGBDilationFilter.h" 4 | 5 | @implementation GPUImageRGBClosingFilter 6 | 7 | - (id)init; 8 | { 9 | if (!(self = [self initWithRadius:1])) 10 | { 11 | return nil; 12 | } 13 | 14 | return self; 15 | } 16 | 17 | - (id)initWithRadius:(NSUInteger)radius; 18 | { 19 | if (!(self = [super init])) 20 | { 21 | return nil; 22 | } 23 | 24 | // First pass: dilation 25 | dilationFilter = [[GPUImageRGBDilationFilter alloc] initWithRadius:radius]; 26 | [self addFilter:dilationFilter]; 27 | 28 | // Second pass: erosion 29 | erosionFilter = [[GPUImageRGBErosionFilter alloc] initWithRadius:radius]; 30 | [self addFilter:erosionFilter]; 31 | 32 | [dilationFilter addTarget:erosionFilter]; 33 | 34 | self.initialFilters = [NSArray arrayWithObjects:dilationFilter, nil]; 35 | self.terminalFilter = erosionFilter; 36 | 37 | return self; 38 | } 39 | 40 | 41 | @end 42 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageRGBDilationFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoPassTextureSamplingFilter.h" 2 | 3 | // For each pixel, this sets it to the maximum value of each color channel in a rectangular neighborhood extending out dilationRadius pixels from the center. 4 | // This extends out brighter colors, and can be used for abstraction of color images. 5 | 6 | @interface GPUImageRGBDilationFilter : GPUImageTwoPassTextureSamplingFilter 7 | 8 | // Acceptable values for dilationRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4. 9 | - (id)initWithRadius:(NSUInteger)dilationRadius; 10 | 11 | @end 12 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageRGBErosionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoPassTextureSamplingFilter.h" 2 | 3 | // For each pixel, this sets it to the minimum value of each color channel in a rectangular neighborhood extending out dilationRadius pixels from the center. 4 | // This extends out dark features, and can be used for abstraction of color images. 5 | 6 | @interface GPUImageRGBErosionFilter : GPUImageTwoPassTextureSamplingFilter 7 | 8 | // Acceptable values for erosionRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4. 9 | - (id)initWithRadius:(NSUInteger)erosionRadius; 10 | 11 | @end 12 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageRGBFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageRGBFilter : GPUImageFilter 4 | { 5 | GLint redUniform; 6 | GLint greenUniform; 7 | GLint blueUniform; 8 | } 9 | 10 | // Normalized values by which each color channel is multiplied. The range is from 0.0 up, with 1.0 as the default. 11 | @property (readwrite, nonatomic) CGFloat red; 12 | @property (readwrite, nonatomic) CGFloat green; 13 | @property (readwrite, nonatomic) CGFloat blue; 14 | 15 | @end 16 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageRGBOpeningFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImageRGBErosionFilter; 4 | @class GPUImageRGBDilationFilter; 5 | 6 | // A filter that first performs an erosion on each color channel of an image, followed by a dilation of the same radius. 7 | // This helps to filter out smaller bright elements. 8 | 9 | @interface GPUImageRGBOpeningFilter : GPUImageFilterGroup 10 | { 11 | GPUImageRGBErosionFilter *erosionFilter; 12 | GPUImageRGBDilationFilter *dilationFilter; 13 | } 14 | 15 | - (id)initWithRadius:(NSUInteger)radius; 16 | 17 | @end 18 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageRGBOpeningFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageRGBOpeningFilter.h" 2 | #import "GPUImageRGBErosionFilter.h" 3 | #import "GPUImageRGBDilationFilter.h" 4 | 5 | @implementation GPUImageRGBOpeningFilter 6 | 7 | - (id)init; 8 | { 9 | if (!(self = [self initWithRadius:1])) 10 | { 11 | return nil; 12 | } 13 | 14 | return self; 15 | } 16 | 17 | - (id)initWithRadius:(NSUInteger)radius; 18 | { 19 | if (!(self = [super init])) 20 | { 21 | return nil; 22 | } 23 | 24 | // First pass: erosion 25 | erosionFilter = [[GPUImageRGBErosionFilter alloc] initWithRadius:radius]; 26 | [self addFilter:erosionFilter]; 27 | 28 | // Second pass: dilation 29 | dilationFilter = [[GPUImageRGBDilationFilter alloc] initWithRadius:radius]; 30 | [self addFilter:dilationFilter]; 31 | 32 | [erosionFilter addTarget:dilationFilter]; 33 | 34 | self.initialFilters = [NSArray arrayWithObjects:erosionFilter, nil]; 35 | self.terminalFilter = dilationFilter; 36 | 37 | return self; 38 | } 39 | 40 | 41 | @end 42 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageRawDataInput.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageOutput.h" 2 | 3 | // The bytes passed into this input are not copied or retained, but you are free to deallocate them after they are used by this filter. 4 | // The bytes are uploaded and stored within a texture, so nothing is kept locally. 5 | // The default format for input bytes is GPUPixelFormatBGRA, unless specified with pixelFormat: 6 | // The default type for input bytes is GPUPixelTypeUByte, unless specified with pixelType: 7 | 8 | typedef enum { 9 | GPUPixelFormatBGRA = GL_BGRA, 10 | GPUPixelFormatRGBA = GL_RGBA, 11 | GPUPixelFormatRGB = GL_RGB, 12 | GPUPixelFormatLuminance = GL_LUMINANCE 13 | } GPUPixelFormat; 14 | 15 | typedef enum { 16 | GPUPixelTypeUByte = GL_UNSIGNED_BYTE, 17 | GPUPixelTypeFloat = GL_FLOAT 18 | } GPUPixelType; 19 | 20 | @interface GPUImageRawDataInput : GPUImageOutput 21 | { 22 | CGSize uploadedImageSize; 23 | 24 | dispatch_semaphore_t dataUpdateSemaphore; 25 | } 26 | 27 | // Initialization and teardown 28 | - (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize; 29 | - (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize pixelFormat:(GPUPixelFormat)pixelFormat; 30 | - (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize pixelFormat:(GPUPixelFormat)pixelFormat type:(GPUPixelType)pixelType; 31 | 32 | /** Input data pixel format 33 | */ 34 | @property (readwrite, nonatomic) GPUPixelFormat pixelFormat; 35 | @property (readwrite, nonatomic) GPUPixelType pixelType; 36 | 37 | // Image rendering 38 | - (void)updateDataFromBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize; 39 | - (void)processData; 40 | - (void)processDataForTimestamp:(CMTime)frameTime; 41 | - (CGSize)outputImageSize; 42 | 43 | @end 44 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageRawDataOutput.h: -------------------------------------------------------------------------------- 1 | #import 2 | #import "GPUImageContext.h" 3 | 4 | struct GPUByteColorVector { 5 | GLubyte red; 6 | GLubyte green; 7 | GLubyte blue; 8 | GLubyte alpha; 9 | }; 10 | typedef struct GPUByteColorVector GPUByteColorVector; 11 | 12 | @protocol GPUImageRawDataProcessor; 13 | 14 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 15 | @interface GPUImageRawDataOutput : NSObject { 16 | CGSize imageSize; 17 | GPUImageRotationMode inputRotation; 18 | BOOL outputBGRA; 19 | } 20 | #else 21 | @interface GPUImageRawDataOutput : NSObject { 22 | CGSize imageSize; 23 | GPUImageRotationMode inputRotation; 24 | BOOL outputBGRA; 25 | } 26 | #endif 27 | 28 | @property(readonly) GLubyte *rawBytesForImage; 29 | @property(nonatomic, copy) void(^newFrameAvailableBlock)(void); 30 | @property(nonatomic) BOOL enabled; 31 | 32 | // Initialization and teardown 33 | - (id)initWithImageSize:(CGSize)newImageSize resultsInBGRAFormat:(BOOL)resultsInBGRAFormat; 34 | 35 | // Data access 36 | - (GPUByteColorVector)colorAtLocation:(CGPoint)locationInImage; 37 | - (NSUInteger)bytesPerRowInOutput; 38 | 39 | - (void)setImageSize:(CGSize)newImageSize; 40 | 41 | - (void)lockFramebufferForReading; 42 | - (void)unlockFramebufferAfterReading; 43 | 44 | @end 45 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSaturationBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageSaturationBlendFilter : GPUImageTwoInputFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSaturationFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** Adjusts the saturation of an image 4 | */ 5 | @interface GPUImageSaturationFilter : GPUImageFilter 6 | { 7 | GLint saturationUniform; 8 | } 9 | 10 | /** Saturation ranges from 0.0 (fully desaturated) to 2.0 (max saturation), with 1.0 as the normal level 11 | */ 12 | @property(readwrite, nonatomic) CGFloat saturation; 13 | 14 | @end 15 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageScreenBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageScreenBlendFilter : GPUImageTwoInputFilter 4 | { 5 | } 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageScreenBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageScreenBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageScreenBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | void main() 13 | { 14 | mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 15 | mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 16 | mediump vec4 whiteColor = vec4(1.0); 17 | gl_FragColor = whiteColor - ((whiteColor - textureColor2) * (whiteColor - textureColor)); 18 | } 19 | ); 20 | #else 21 | NSString *const kGPUImageScreenBlendFragmentShaderString = SHADER_STRING 22 | ( 23 | varying vec2 textureCoordinate; 24 | varying vec2 textureCoordinate2; 25 | 26 | uniform sampler2D inputImageTexture; 27 | uniform sampler2D inputImageTexture2; 28 | 29 | void main() 30 | { 31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 33 | vec4 whiteColor = vec4(1.0); 34 | gl_FragColor = whiteColor - ((whiteColor - textureColor2) * (whiteColor - textureColor)); 35 | } 36 | ); 37 | #endif 38 | 39 | @implementation GPUImageScreenBlendFilter 40 | 41 | - (id)init; 42 | { 43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageScreenBlendFragmentShaderString])) 44 | { 45 | return nil; 46 | } 47 | 48 | return self; 49 | } 50 | 51 | @end 52 | 53 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSepiaFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageColorMatrixFilter.h" 2 | 3 | /// Simple sepia tone filter 4 | @interface GPUImageSepiaFilter : GPUImageColorMatrixFilter 5 | 6 | @end 7 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSepiaFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageSepiaFilter.h" 2 | 3 | @implementation GPUImageSepiaFilter 4 | 5 | - (id)init; 6 | { 7 | if (!(self = [super init])) 8 | { 9 | return nil; 10 | } 11 | 12 | self.intensity = 1.0; 13 | self.colorMatrix = (GPUMatrix4x4){ 14 | {0.3588, 0.7044, 0.1368, 0.0}, 15 | {0.2990, 0.5870, 0.1140, 0.0}, 16 | {0.2392, 0.4696, 0.0912 ,0.0}, 17 | {0,0,0,1.0}, 18 | }; 19 | 20 | return self; 21 | } 22 | 23 | @end 24 | 25 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSharpenFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageSharpenFilter : GPUImageFilter 4 | { 5 | GLint sharpnessUniform; 6 | GLint imageWidthFactorUniform, imageHeightFactorUniform; 7 | } 8 | 9 | // Sharpness ranges from -4.0 to 4.0, with 0.0 as the normal level 10 | @property(readwrite, nonatomic) CGFloat sharpness; 11 | 12 | @end 13 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageShiTomasiFeatureDetectionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageHarrisCornerDetectionFilter.h" 2 | 3 | /** Shi-Tomasi feature detector 4 | 5 | This is the Shi-Tomasi feature detector, as described in 6 | J. Shi and C. Tomasi. Good features to track. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 593-600, June 1994. 7 | */ 8 | 9 | @interface GPUImageShiTomasiFeatureDetectionFilter : GPUImageHarrisCornerDetectionFilter 10 | 11 | // Compared to the Harris corner detector, the default sensitivity value for this detector is set to 1.5 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSingleComponentGaussianBlurFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageGaussianBlurFilter.h" 2 | 3 | // This filter merely performs the standard Gaussian blur on the red color channel (assuming a luminance image) 4 | 5 | @interface GPUImageSingleComponentGaussianBlurFilter : GPUImageGaussianBlurFilter 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSketchFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageSobelEdgeDetectionFilter.h" 2 | 3 | /** Converts video to look like a sketch. 4 | 5 | This is just the Sobel edge detection filter with the colors inverted. 6 | */ 7 | @interface GPUImageSketchFilter : GPUImageSobelEdgeDetectionFilter 8 | { 9 | } 10 | 11 | @end 12 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSkinToneFilter.h: -------------------------------------------------------------------------------- 1 | // 2 | // GPUImageSkinToneFilter.h 3 | // 4 | // 5 | // Created by github.com/r3mus on 8/14/15. 6 | // 7 | // 8 | 9 | #import "GPUImageTwoInputFilter.h" 10 | 11 | typedef NS_ENUM(NSUInteger, GPUImageSkinToneUpperColor) { 12 | GPUImageSkinToneUpperColorGreen, 13 | GPUImageSkinToneUpperColorOrange 14 | }; 15 | 16 | extern NSString *const kGPUImageSkinToneFragmentShaderString; 17 | 18 | @interface GPUImageSkinToneFilter : GPUImageFilter 19 | { 20 | GLint skinToneAdjustUniform; 21 | GLint skinHueUniform; 22 | GLint skinHueThresholdUniform; 23 | GLint maxHueShiftUniform; 24 | GLint maxSaturationShiftUniform; 25 | GLint upperSkinToneColorUniform; 26 | } 27 | 28 | // The amount of effect to apply, between -1.0 (pink) and +1.0 (orange OR green). Default is 0.0. 29 | @property (nonatomic, readwrite) CGFloat skinToneAdjust; 30 | 31 | // The initial hue of skin to adjust. Default is 0.05 (a common skin red). 32 | @property (nonatomic, readwrite) CGFloat skinHue; 33 | 34 | // The bell curve "breadth" of the skin hue adjustment (i.e. how different from the original skinHue will the modifications effect). 35 | // Default is 40.0 36 | @property (nonatomic, readwrite) CGFloat skinHueThreshold; 37 | 38 | // The maximum amount of hue shift allowed in the adjustments that affect hue (pink, green). Default = 0.25. 39 | @property (nonatomic, readwrite) CGFloat maxHueShift; 40 | 41 | // The maximum amount of saturation shift allowed in the adjustments that affect saturation (orange). Default = 0.4. 42 | @property (nonatomic, readwrite) CGFloat maxSaturationShift; 43 | 44 | // Defines whether the upper range (> 0.0) will change the skin tone to green (hue) or orange (saturation) 45 | @property (nonatomic, readwrite) GPUImageSkinToneUpperColor upperSkinToneColor; 46 | 47 | @end 48 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSmoothToonFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImageGaussianBlurFilter; 4 | @class GPUImageToonFilter; 5 | 6 | /** This uses a similar process as the GPUImageToonFilter, only it precedes the toon effect with a Gaussian blur to smooth out noise. 7 | */ 8 | @interface GPUImageSmoothToonFilter : GPUImageFilterGroup 9 | { 10 | GPUImageGaussianBlurFilter *blurFilter; 11 | GPUImageToonFilter *toonFilter; 12 | } 13 | 14 | /// The image width and height factors tweak the appearance of the edges. By default, they match the filter size in pixels 15 | @property(readwrite, nonatomic) CGFloat texelWidth; 16 | /// The image width and height factors tweak the appearance of the edges. By default, they match the filter size in pixels 17 | @property(readwrite, nonatomic) CGFloat texelHeight; 18 | 19 | /// The radius of the underlying Gaussian blur. The default is 2.0. 20 | @property (readwrite, nonatomic) CGFloat blurRadiusInPixels; 21 | 22 | /// The threshold at which to apply the edges, default of 0.2 23 | @property(readwrite, nonatomic) CGFloat threshold; 24 | 25 | /// The levels of quantization for the posterization of colors within the scene, with a default of 10.0 26 | @property(readwrite, nonatomic) CGFloat quantizationLevels; 27 | 28 | @end 29 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSobelEdgeDetectionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoPassFilter.h" 2 | 3 | @interface GPUImageSobelEdgeDetectionFilter : GPUImageTwoPassFilter 4 | { 5 | GLint texelWidthUniform, texelHeightUniform, edgeStrengthUniform; 6 | BOOL hasOverriddenImageSizeFactor; 7 | } 8 | 9 | // The texel width and height factors tweak the appearance of the edges. By default, they match the inverse of the filter size in pixels 10 | @property(readwrite, nonatomic) CGFloat texelWidth; 11 | @property(readwrite, nonatomic) CGFloat texelHeight; 12 | 13 | // The filter strength property affects the dynamic range of the filter. High values can make edges more visible, but can lead to saturation. Default of 1.0. 14 | @property(readwrite, nonatomic) CGFloat edgeStrength; 15 | 16 | @end 17 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSoftEleganceFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImagePicture; 4 | 5 | /** A photo filter based on Soft Elegance Photoshop action 6 | http://h-d-stock.deviantart.com/art/H-D-A-soft-elegance-70107603 7 | */ 8 | 9 | // Note: If you want to use this effect you have to add 10 | // lookup_soft_elegance_1.png and lookup_soft_elegance_2.png 11 | // from Resources folder to your application bundle. 12 | 13 | @interface GPUImageSoftEleganceFilter : GPUImageFilterGroup 14 | { 15 | GPUImagePicture *lookupImageSource1; 16 | GPUImagePicture *lookupImageSource2; 17 | } 18 | 19 | @end 20 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSoftLightBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageSoftLightBlendFilter : GPUImageTwoInputFilter 4 | { 5 | } 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSoftLightBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageSoftLightBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageSoftLightBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | void main() 13 | { 14 | mediump vec4 base = texture2D(inputImageTexture, textureCoordinate); 15 | mediump vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); 16 | 17 | lowp float alphaDivisor = base.a + step(base.a, 0.0); // Protect against a divide-by-zero blacking out things in the output 18 | gl_FragColor = base * (overlay.a * (base / alphaDivisor) + (2.0 * overlay * (1.0 - (base / alphaDivisor)))) + overlay * (1.0 - base.a) + base * (1.0 - overlay.a); 19 | } 20 | ); 21 | #else 22 | NSString *const kGPUImageSoftLightBlendFragmentShaderString = SHADER_STRING 23 | ( 24 | varying vec2 textureCoordinate; 25 | varying vec2 textureCoordinate2; 26 | 27 | uniform sampler2D inputImageTexture; 28 | uniform sampler2D inputImageTexture2; 29 | 30 | void main() 31 | { 32 | vec4 base = texture2D(inputImageTexture, textureCoordinate); 33 | vec4 overlay = texture2D(inputImageTexture2, textureCoordinate2); 34 | 35 | float alphaDivisor = base.a + step(base.a, 0.0); // Protect against a divide-by-zero blacking out things in the output 36 | gl_FragColor = base * (overlay.a * (base / alphaDivisor) + (2.0 * overlay * (1.0 - (base / alphaDivisor)))) + overlay * (1.0 - base.a) + base * (1.0 - overlay.a); 37 | } 38 | ); 39 | #endif 40 | 41 | @implementation GPUImageSoftLightBlendFilter 42 | 43 | - (id)init; 44 | { 45 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageSoftLightBlendFragmentShaderString])) 46 | { 47 | return nil; 48 | } 49 | 50 | return self; 51 | } 52 | 53 | @end 54 | 55 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSolarizeFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** Pixels with a luminance above the threshold will invert their color 4 | */ 5 | @interface GPUImageSolarizeFilter : GPUImageFilter 6 | { 7 | GLint thresholdUniform; 8 | } 9 | 10 | /** Anything above this luminance will be inverted, and anything below normal. Ranges from 0.0 to 1.0, with 0.5 as the default 11 | */ 12 | @property(readwrite, nonatomic) CGFloat threshold; 13 | 14 | @end -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSolarizeFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageSolarizeFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageSolarizeFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | 8 | uniform sampler2D inputImageTexture; 9 | uniform highp float threshold; 10 | 11 | const highp vec3 W = vec3(0.2125, 0.7154, 0.0721); 12 | 13 | void main() 14 | { 15 | highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 16 | highp float luminance = dot(textureColor.rgb, W); 17 | highp float thresholdResult = step(luminance, threshold); 18 | highp vec3 finalColor = abs(thresholdResult - textureColor.rgb); 19 | 20 | gl_FragColor = vec4(finalColor, textureColor.w); 21 | } 22 | ); 23 | #else 24 | NSString *const kGPUImageSolarizeFragmentShaderString = SHADER_STRING 25 | ( 26 | varying vec2 textureCoordinate; 27 | 28 | uniform sampler2D inputImageTexture; 29 | uniform float threshold; 30 | 31 | const vec3 W = vec3(0.2125, 0.7154, 0.0721); 32 | 33 | void main() 34 | { 35 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 36 | float luminance = dot(textureColor.rgb, W); 37 | float thresholdResult = step(luminance, threshold); 38 | vec3 finalColor = abs(thresholdResult - textureColor.rgb); 39 | 40 | gl_FragColor = vec4(vec3(finalColor), textureColor.w); 41 | } 42 | ); 43 | #endif 44 | 45 | @implementation GPUImageSolarizeFilter; 46 | 47 | @synthesize threshold = _threshold; 48 | 49 | #pragma mark - 50 | #pragma mark Initialization 51 | 52 | - (id)init; 53 | { 54 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageSolarizeFragmentShaderString])) 55 | { 56 | return nil; 57 | } 58 | 59 | thresholdUniform = [filterProgram uniformIndex:@"threshold"]; 60 | self.threshold = 0.5; 61 | 62 | return self; 63 | } 64 | 65 | #pragma mark - 66 | #pragma mark Accessors 67 | 68 | - (void)setThreshold:(CGFloat)newValue; 69 | { 70 | _threshold = newValue; 71 | 72 | [self setFloat:_threshold forUniform:thresholdUniform program:filterProgram]; 73 | } 74 | 75 | 76 | @end -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSolidColorGenerator.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | // This outputs an image with a constant color. You need to use -forceProcessingAtSize: in order to set the output image 4 | // dimensions, or this won't work correctly 5 | 6 | 7 | @interface GPUImageSolidColorGenerator : GPUImageFilter 8 | { 9 | GLint colorUniform; 10 | GLint useExistingAlphaUniform; 11 | } 12 | 13 | // This color dictates what the output image will be filled with 14 | @property(readwrite, nonatomic) GPUVector4 color; 15 | @property(readwrite, nonatomic, assign) BOOL useExistingAlpha; // whether to use the alpha of the existing image or not, default is NO 16 | 17 | - (void)setColorRed:(CGFloat)redComponent green:(CGFloat)greenComponent blue:(CGFloat)blueComponent alpha:(CGFloat)alphaComponent; 18 | 19 | @end 20 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSourceOverBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageSourceOverBlendFilter : GPUImageTwoInputFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSourceOverBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageSourceOverBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageSourceOverBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | void main() 13 | { 14 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 15 | lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate); 16 | 17 | gl_FragColor = mix(textureColor, textureColor2, textureColor2.a); 18 | } 19 | ); 20 | #else 21 | NSString *const kGPUImageSourceOverBlendFragmentShaderString = SHADER_STRING 22 | ( 23 | varying vec2 textureCoordinate; 24 | varying vec2 textureCoordinate2; 25 | 26 | uniform sampler2D inputImageTexture; 27 | uniform sampler2D inputImageTexture2; 28 | 29 | void main() 30 | { 31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate); 33 | 34 | gl_FragColor = mix(textureColor, textureColor2, textureColor2.a); 35 | } 36 | ); 37 | #endif 38 | 39 | @implementation GPUImageSourceOverBlendFilter 40 | 41 | - (id)init; 42 | { 43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageSourceOverBlendFragmentShaderString])) 44 | { 45 | return nil; 46 | } 47 | 48 | return self; 49 | } 50 | 51 | @end 52 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSphereRefractionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageSphereRefractionFilter : GPUImageFilter 4 | { 5 | GLint radiusUniform, centerUniform, aspectRatioUniform, refractiveIndexUniform; 6 | } 7 | 8 | /// The center about which to apply the distortion, with a default of (0.5, 0.5) 9 | @property(readwrite, nonatomic) CGPoint center; 10 | /// The radius of the distortion, ranging from 0.0 to 1.0, with a default of 0.25 11 | @property(readwrite, nonatomic) CGFloat radius; 12 | /// The index of refraction for the sphere, with a default of 0.71 13 | @property(readwrite, nonatomic) CGFloat refractiveIndex; 14 | 15 | @end 16 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageStillCamera.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageVideoCamera.h" 2 | 3 | void stillImageDataReleaseCallback(void *releaseRefCon, const void *baseAddress); 4 | void GPUImageCreateResizedSampleBuffer(CVPixelBufferRef cameraFrame, CGSize finalSize, CMSampleBufferRef *sampleBuffer); 5 | 6 | @interface GPUImageStillCamera : GPUImageVideoCamera 7 | 8 | /** The JPEG compression quality to use when capturing a photo as a JPEG. 9 | */ 10 | @property CGFloat jpegCompressionQuality; 11 | 12 | // Only reliably set inside the context of the completion handler of one of the capture methods 13 | @property (readonly) NSDictionary *currentCaptureMetadata; 14 | 15 | // Photography controls 16 | - (void)capturePhotoAsSampleBufferWithCompletionHandler:(void (^)(CMSampleBufferRef imageSampleBuffer, NSError *error))block; 17 | - (void)capturePhotoAsImageProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(UIImage *processedImage, NSError *error))block; 18 | - (void)capturePhotoAsImageProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withOrientation:(UIImageOrientation)orientation withCompletionHandler:(void (^)(UIImage *processedImage, NSError *error))block; 19 | - (void)capturePhotoAsJPEGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(NSData *processedJPEG, NSError *error))block; 20 | - (void)capturePhotoAsJPEGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withOrientation:(UIImageOrientation)orientation withCompletionHandler:(void (^)(NSData *processedJPEG, NSError *error))block; 21 | - (void)capturePhotoAsPNGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(NSData *processedPNG, NSError *error))block; 22 | - (void)capturePhotoAsPNGProcessedUpToFilter:(GPUImageOutput *)finalFilterInChain withOrientation:(UIImageOrientation)orientation withCompletionHandler:(void (^)(NSData *processedPNG, NSError *error))block; 23 | 24 | @end 25 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageStretchDistortionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** Creates a stretch distortion of the image 4 | */ 5 | @interface GPUImageStretchDistortionFilter : GPUImageFilter { 6 | GLint centerUniform; 7 | } 8 | 9 | /** The center about which to apply the distortion, with a default of (0.5, 0.5) 10 | */ 11 | @property(readwrite, nonatomic) CGPoint center; 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSubtractBlendFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageSubtractBlendFilter : GPUImageTwoInputFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSubtractBlendFilter.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageSubtractBlendFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kGPUImageSubtractBlendFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | varying highp vec2 textureCoordinate2; 8 | 9 | uniform sampler2D inputImageTexture; 10 | uniform sampler2D inputImageTexture2; 11 | 12 | void main() 13 | { 14 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 15 | lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 16 | 17 | gl_FragColor = vec4(textureColor.rgb - textureColor2.rgb, textureColor.a); 18 | } 19 | ); 20 | #else 21 | NSString *const kGPUImageSubtractBlendFragmentShaderString = SHADER_STRING 22 | ( 23 | varying vec2 textureCoordinate; 24 | varying vec2 textureCoordinate2; 25 | 26 | uniform sampler2D inputImageTexture; 27 | uniform sampler2D inputImageTexture2; 28 | 29 | void main() 30 | { 31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2); 33 | 34 | gl_FragColor = vec4(textureColor.rgb - textureColor2.rgb, textureColor.a); 35 | } 36 | ); 37 | #endif 38 | 39 | @implementation GPUImageSubtractBlendFilter 40 | 41 | - (id)init; 42 | { 43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageSubtractBlendFragmentShaderString])) 44 | { 45 | return nil; 46 | } 47 | 48 | return self; 49 | } 50 | 51 | @end 52 | 53 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageSwirlFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** Creates a swirl distortion on the image 4 | */ 5 | @interface GPUImageSwirlFilter : GPUImageFilter 6 | { 7 | GLint radiusUniform, centerUniform, angleUniform; 8 | } 9 | 10 | /// The center about which to apply the distortion, with a default of (0.5, 0.5) 11 | @property(readwrite, nonatomic) CGPoint center; 12 | /// The radius of the distortion, ranging from 0.0 to 1.0, with a default of 0.5 13 | @property(readwrite, nonatomic) CGFloat radius; 14 | /// The amount of distortion to apply, with a minimum of 0.0 and a default of 1.0 15 | @property(readwrite, nonatomic) CGFloat angle; 16 | 17 | @end 18 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageTextureInput.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageOutput.h" 2 | 3 | @interface GPUImageTextureInput : GPUImageOutput 4 | { 5 | CGSize textureSize; 6 | } 7 | 8 | // Initialization and teardown 9 | - (id)initWithTexture:(GLuint)newInputTexture size:(CGSize)newTextureSize; 10 | 11 | // Image rendering 12 | - (void)processTextureWithFrameTime:(CMTime)frameTime; 13 | 14 | @end 15 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageTextureInput.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageTextureInput.h" 2 | 3 | @implementation GPUImageTextureInput 4 | 5 | #pragma mark - 6 | #pragma mark Initialization and teardown 7 | 8 | - (id)initWithTexture:(GLuint)newInputTexture size:(CGSize)newTextureSize; 9 | { 10 | if (!(self = [super init])) 11 | { 12 | return nil; 13 | } 14 | 15 | runSynchronouslyOnVideoProcessingQueue(^{ 16 | [GPUImageContext useImageProcessingContext]; 17 | }); 18 | 19 | textureSize = newTextureSize; 20 | 21 | runSynchronouslyOnVideoProcessingQueue(^{ 22 | outputFramebuffer = [[GPUImageFramebuffer alloc] initWithSize:newTextureSize overriddenTexture:newInputTexture]; 23 | }); 24 | 25 | return self; 26 | } 27 | 28 | #pragma mark - 29 | #pragma mark Image rendering 30 | 31 | - (void)processTextureWithFrameTime:(CMTime)frameTime; 32 | { 33 | runAsynchronouslyOnVideoProcessingQueue(^{ 34 | for (id currentTarget in targets) 35 | { 36 | NSInteger indexOfObject = [targets indexOfObject:currentTarget]; 37 | NSInteger targetTextureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue]; 38 | 39 | [currentTarget setInputSize:textureSize atIndex:targetTextureIndex]; 40 | [currentTarget setInputFramebuffer:outputFramebuffer atIndex:targetTextureIndex]; 41 | [currentTarget newFrameReadyAtTime:frameTime atIndex:targetTextureIndex]; 42 | } 43 | }); 44 | } 45 | 46 | @end 47 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageTextureOutput.h: -------------------------------------------------------------------------------- 1 | #import 2 | #import "GPUImageContext.h" 3 | 4 | @protocol GPUImageTextureOutputDelegate; 5 | 6 | @interface GPUImageTextureOutput : NSObject 7 | { 8 | GPUImageFramebuffer *firstInputFramebuffer; 9 | } 10 | 11 | @property(readwrite, unsafe_unretained, nonatomic) id delegate; 12 | @property(readonly) GLuint texture; 13 | @property(nonatomic) BOOL enabled; 14 | 15 | - (void)doneWithTexture; 16 | 17 | @end 18 | 19 | @protocol GPUImageTextureOutputDelegate 20 | - (void)newFrameReadyFromTextureOutput:(GPUImageTextureOutput *)callbackTextureOutput; 21 | @end 22 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageTextureOutput.m: -------------------------------------------------------------------------------- 1 | #import "GPUImageTextureOutput.h" 2 | 3 | @implementation GPUImageTextureOutput 4 | 5 | @synthesize delegate = _delegate; 6 | @synthesize texture = _texture; 7 | @synthesize enabled; 8 | 9 | #pragma mark - 10 | #pragma mark Initialization and teardown 11 | 12 | - (id)init; 13 | { 14 | if (!(self = [super init])) 15 | { 16 | return nil; 17 | } 18 | 19 | self.enabled = YES; 20 | 21 | return self; 22 | } 23 | 24 | - (void)doneWithTexture; 25 | { 26 | [firstInputFramebuffer unlock]; 27 | } 28 | 29 | #pragma mark - 30 | #pragma mark GPUImageInput protocol 31 | 32 | - (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex; 33 | { 34 | [_delegate newFrameReadyFromTextureOutput:self]; 35 | } 36 | 37 | - (NSInteger)nextAvailableTextureIndex; 38 | { 39 | return 0; 40 | } 41 | 42 | // TODO: Deal with the fact that the texture changes regularly as a result of the caching 43 | - (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex; 44 | { 45 | firstInputFramebuffer = newInputFramebuffer; 46 | [firstInputFramebuffer lock]; 47 | 48 | _texture = [firstInputFramebuffer texture]; 49 | } 50 | 51 | - (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex; 52 | { 53 | } 54 | 55 | - (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex; 56 | { 57 | } 58 | 59 | - (CGSize)maximumOutputSize; 60 | { 61 | return CGSizeZero; 62 | } 63 | 64 | - (void)endProcessing 65 | { 66 | } 67 | 68 | - (BOOL)shouldIgnoreUpdatesToThisTarget; 69 | { 70 | return NO; 71 | } 72 | 73 | - (BOOL)wantsMonochromeInput; 74 | { 75 | return NO; 76 | } 77 | 78 | - (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue; 79 | { 80 | 81 | } 82 | 83 | @end 84 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageThreeInputFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | extern NSString *const kGPUImageThreeInputTextureVertexShaderString; 4 | 5 | @interface GPUImageThreeInputFilter : GPUImageTwoInputFilter 6 | { 7 | GPUImageFramebuffer *thirdInputFramebuffer; 8 | 9 | GLint filterThirdTextureCoordinateAttribute; 10 | GLint filterInputTextureUniform3; 11 | GPUImageRotationMode inputRotation3; 12 | GLuint filterSourceTexture3; 13 | CMTime thirdFrameTime; 14 | 15 | BOOL hasSetSecondTexture, hasReceivedThirdFrame, thirdFrameWasVideo; 16 | BOOL thirdFrameCheckDisabled; 17 | } 18 | 19 | - (void)disableThirdFrameCheck; 20 | 21 | @end 22 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageThresholdEdgeDetectionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageSobelEdgeDetectionFilter.h" 2 | 3 | @interface GPUImageThresholdEdgeDetectionFilter : GPUImageSobelEdgeDetectionFilter 4 | { 5 | GLint thresholdUniform; 6 | } 7 | 8 | /** Any edge above this threshold will be black, and anything below white. Ranges from 0.0 to 1.0, with 0.8 as the default 9 | */ 10 | @property(readwrite, nonatomic) CGFloat threshold; 11 | 12 | @end 13 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageThresholdSketchFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageThresholdEdgeDetectionFilter.h" 2 | 3 | @interface GPUImageThresholdSketchFilter : GPUImageThresholdEdgeDetectionFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageThresholdedNonMaximumSuppressionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImage3x3TextureSamplingFilter.h" 2 | 3 | @interface GPUImageThresholdedNonMaximumSuppressionFilter : GPUImage3x3TextureSamplingFilter 4 | { 5 | GLint thresholdUniform; 6 | } 7 | 8 | /** Any local maximum above this threshold will be white, and anything below black. Ranges from 0.0 to 1.0, with 0.8 as the default 9 | */ 10 | @property(readwrite, nonatomic) CGFloat threshold; 11 | 12 | - (id)initWithPackedColorspace:(BOOL)inputUsesPackedColorspace; 13 | 14 | @end 15 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageTiltShiftFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImageGaussianBlurFilter; 4 | 5 | /// A simulated tilt shift lens effect 6 | @interface GPUImageTiltShiftFilter : GPUImageFilterGroup 7 | { 8 | GPUImageGaussianBlurFilter *blurFilter; 9 | GPUImageFilter *tiltShiftFilter; 10 | } 11 | 12 | /// The radius of the underlying blur, in pixels. This is 7.0 by default. 13 | @property(readwrite, nonatomic) CGFloat blurRadiusInPixels; 14 | 15 | /// The normalized location of the top of the in-focus area in the image, this value should be lower than bottomFocusLevel, default 0.4 16 | @property(readwrite, nonatomic) CGFloat topFocusLevel; 17 | 18 | /// The normalized location of the bottom of the in-focus area in the image, this value should be higher than topFocusLevel, default 0.6 19 | @property(readwrite, nonatomic) CGFloat bottomFocusLevel; 20 | 21 | /// The rate at which the image gets blurry away from the in-focus region, default 0.2 22 | @property(readwrite, nonatomic) CGFloat focusFallOffRate; 23 | 24 | @end 25 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageToneCurveFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageToneCurveFilter : GPUImageFilter 4 | 5 | @property(readwrite, nonatomic, copy) NSArray *redControlPoints; 6 | @property(readwrite, nonatomic, copy) NSArray *greenControlPoints; 7 | @property(readwrite, nonatomic, copy) NSArray *blueControlPoints; 8 | @property(readwrite, nonatomic, copy) NSArray *rgbCompositeControlPoints; 9 | 10 | // Initialization and teardown 11 | - (id)initWithACVData:(NSData*)data; 12 | 13 | - (id)initWithACV:(NSString*)curveFilename; 14 | - (id)initWithACVURL:(NSURL*)curveFileURL; 15 | 16 | // This lets you set all three red, green, and blue tone curves at once. 17 | // NOTE: Deprecated this function because this effect can be accomplished 18 | // using the rgbComposite channel rather then setting all 3 R, G, and B channels. 19 | - (void)setRGBControlPoints:(NSArray *)points DEPRECATED_ATTRIBUTE; 20 | 21 | - (void)setPointsWithACV:(NSString*)curveFilename; 22 | - (void)setPointsWithACVURL:(NSURL*)curveFileURL; 23 | 24 | // Curve calculation 25 | - (NSMutableArray *)getPreparedSplineCurve:(NSArray *)points; 26 | - (NSMutableArray *)splineCurve:(NSArray *)points; 27 | - (NSMutableArray *)secondDerivative:(NSArray *)cgPoints; 28 | - (void)updateToneCurveTexture; 29 | 30 | @end 31 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageToonFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImage3x3TextureSamplingFilter.h" 2 | 3 | /** This uses Sobel edge detection to place a black border around objects, 4 | and then it quantizes the colors present in the image to give a cartoon-like quality to the image. 5 | */ 6 | @interface GPUImageToonFilter : GPUImage3x3TextureSamplingFilter 7 | { 8 | GLint thresholdUniform, quantizationLevelsUniform; 9 | } 10 | 11 | /** The threshold at which to apply the edges, default of 0.2 12 | */ 13 | @property(readwrite, nonatomic) CGFloat threshold; 14 | 15 | /** The levels of quantization for the posterization of colors within the scene, with a default of 10.0 16 | */ 17 | @property(readwrite, nonatomic) CGFloat quantizationLevels; 18 | 19 | @end 20 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageTransformFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageTransformFilter : GPUImageFilter 4 | { 5 | GLint transformMatrixUniform, orthographicMatrixUniform; 6 | GPUMatrix4x4 orthographicMatrix; 7 | } 8 | 9 | // You can either set the transform to apply to be a 2-D affine transform or a 3-D transform. The default is the identity transform (the output image is identical to the input). 10 | @property(readwrite, nonatomic) CGAffineTransform affineTransform; 11 | @property(readwrite, nonatomic) CATransform3D transform3D; 12 | 13 | // This applies the transform to the raw frame data if set to YES, the default of NO takes the aspect ratio of the image input into account when rotating 14 | @property(readwrite, nonatomic) BOOL ignoreAspectRatio; 15 | 16 | // sets the anchor point to top left corner 17 | @property(readwrite, nonatomic) BOOL anchorTopLeft; 18 | 19 | @end 20 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputCrossTextureSamplingFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageTwoInputCrossTextureSamplingFilter : GPUImageTwoInputFilter 4 | { 5 | GLint texelWidthUniform, texelHeightUniform; 6 | 7 | CGFloat texelWidth, texelHeight; 8 | BOOL hasOverriddenImageSizeFactor; 9 | } 10 | 11 | // The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects. 12 | @property(readwrite, nonatomic) CGFloat texelWidth; 13 | @property(readwrite, nonatomic) CGFloat texelHeight; 14 | 15 | @end 16 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageTwoInputFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | extern NSString *const kGPUImageTwoInputTextureVertexShaderString; 4 | 5 | @interface GPUImageTwoInputFilter : GPUImageFilter 6 | { 7 | GPUImageFramebuffer *secondInputFramebuffer; 8 | 9 | GLint filterSecondTextureCoordinateAttribute; 10 | GLint filterInputTextureUniform2; 11 | GPUImageRotationMode inputRotation2; 12 | CMTime firstFrameTime, secondFrameTime; 13 | 14 | BOOL hasSetFirstTexture, hasReceivedFirstFrame, hasReceivedSecondFrame, firstFrameWasVideo, secondFrameWasVideo; 15 | BOOL firstFrameCheckDisabled, secondFrameCheckDisabled; 16 | } 17 | 18 | - (void)disableFirstFrameCheck; 19 | - (void)disableSecondFrameCheck; 20 | 21 | @end 22 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageTwoPassFilter : GPUImageFilter 4 | { 5 | GPUImageFramebuffer *secondOutputFramebuffer; 6 | 7 | GLProgram *secondFilterProgram; 8 | GLint secondFilterPositionAttribute, secondFilterTextureCoordinateAttribute; 9 | GLint secondFilterInputTextureUniform, secondFilterInputTextureUniform2; 10 | 11 | NSMutableDictionary *secondProgramUniformStateRestorationBlocks; 12 | } 13 | 14 | // Initialization and teardown 15 | - (id)initWithFirstStageVertexShaderFromString:(NSString *)firstStageVertexShaderString firstStageFragmentShaderFromString:(NSString *)firstStageFragmentShaderString secondStageVertexShaderFromString:(NSString *)secondStageVertexShaderString secondStageFragmentShaderFromString:(NSString *)secondStageFragmentShaderString; 16 | - (id)initWithFirstStageFragmentShaderFromString:(NSString *)firstStageFragmentShaderString secondStageFragmentShaderFromString:(NSString *)secondStageFragmentShaderString; 17 | - (void)initializeSecondaryAttributes; 18 | 19 | @end 20 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageTwoPassTextureSamplingFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoPassFilter.h" 2 | 3 | @interface GPUImageTwoPassTextureSamplingFilter : GPUImageTwoPassFilter 4 | { 5 | GLint verticalPassTexelWidthOffsetUniform, verticalPassTexelHeightOffsetUniform, horizontalPassTexelWidthOffsetUniform, horizontalPassTexelHeightOffsetUniform; 6 | GLfloat verticalPassTexelWidthOffset, verticalPassTexelHeightOffset, horizontalPassTexelWidthOffset, horizontalPassTexelHeightOffset; 7 | CGFloat _verticalTexelSpacing, _horizontalTexelSpacing; 8 | } 9 | 10 | // This sets the spacing between texels (in pixels) when sampling for the first. By default, this is 1.0 11 | @property(readwrite, nonatomic) CGFloat verticalTexelSpacing, horizontalTexelSpacing; 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageUIElement.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageOutput.h" 2 | 3 | @interface GPUImageUIElement : GPUImageOutput 4 | 5 | // Initialization and teardown 6 | - (id)initWithView:(UIView *)inputView; 7 | - (id)initWithLayer:(CALayer *)inputLayer; 8 | 9 | // Layer management 10 | - (CGSize)layerSizeInPixels; 11 | - (void)update; 12 | - (void)updateUsingCurrentTime; 13 | - (void)updateWithTimestamp:(CMTime)frameTime; 14 | 15 | @end 16 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageUnsharpMaskFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImageGaussianBlurFilter; 4 | 5 | @interface GPUImageUnsharpMaskFilter : GPUImageFilterGroup 6 | { 7 | GPUImageGaussianBlurFilter *blurFilter; 8 | GPUImageFilter *unsharpMaskFilter; 9 | } 10 | // The blur radius of the underlying Gaussian blur. The default is 4.0. 11 | @property (readwrite, nonatomic) CGFloat blurRadiusInPixels; 12 | 13 | // The strength of the sharpening, from 0.0 on up, with a default of 1.0 14 | @property(readwrite, nonatomic) CGFloat intensity; 15 | 16 | @end 17 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageVignetteFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | /** Performs a vignetting effect, fading out the image at the edges 4 | */ 5 | @interface GPUImageVignetteFilter : GPUImageFilter 6 | { 7 | GLint vignetteCenterUniform, vignetteColorUniform, vignetteStartUniform, vignetteEndUniform; 8 | } 9 | 10 | // the center for the vignette in tex coords (defaults to 0.5, 0.5) 11 | @property (nonatomic, readwrite) CGPoint vignetteCenter; 12 | 13 | // The color to use for the Vignette (defaults to black) 14 | @property (nonatomic, readwrite) GPUVector3 vignetteColor; 15 | 16 | // The normalized distance from the center where the vignette effect starts. Default of 0.5. 17 | @property (nonatomic, readwrite) CGFloat vignetteStart; 18 | 19 | // The normalized distance from the center where the vignette effect ends. Default of 0.75. 20 | @property (nonatomic, readwrite) CGFloat vignetteEnd; 21 | 22 | @end 23 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageVoronoiConsumerFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageTwoInputFilter.h" 2 | 3 | @interface GPUImageVoronoiConsumerFilter : GPUImageTwoInputFilter 4 | { 5 | GLint sizeUniform; 6 | } 7 | 8 | @property (nonatomic, readwrite) CGSize sizeInPixels; 9 | 10 | @end 11 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageWeakPixelInclusionFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImage3x3TextureSamplingFilter.h" 2 | 3 | @interface GPUImageWeakPixelInclusionFilter : GPUImage3x3TextureSamplingFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageWhiteBalanceFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | /** 3 | * Created by Alaric Cole 4 | * Allows adjustment of color temperature in terms of what an image was effectively shot in. This means higher Kelvin values will warm the image, while lower values will cool it. 5 | 6 | */ 7 | @interface GPUImageWhiteBalanceFilter : GPUImageFilter 8 | { 9 | GLint temperatureUniform, tintUniform; 10 | } 11 | //choose color temperature, in degrees Kelvin 12 | @property(readwrite, nonatomic) CGFloat temperature; 13 | 14 | //adjust tint to compensate 15 | @property(readwrite, nonatomic) CGFloat tint; 16 | 17 | @end 18 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageXYDerivativeFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageSobelEdgeDetectionFilter.h" 2 | 3 | @interface GPUImageXYDerivativeFilter : GPUImageSobelEdgeDetectionFilter 4 | 5 | @end 6 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageZoomBlurFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface GPUImageZoomBlurFilter : GPUImageFilter 4 | 5 | /** A multiplier for the blur size, ranging from 0.0 on up, with a default of 1.0 6 | */ 7 | @property (readwrite, nonatomic) CGFloat blurSize; 8 | 9 | /** The normalized center of the blur. (0.5, 0.5) by default 10 | */ 11 | @property (readwrite, nonatomic) CGPoint blurCenter; 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/GPUImageiOSBlurFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilterGroup.h" 2 | 3 | @class GPUImageSaturationFilter; 4 | @class GPUImageGaussianBlurFilter; 5 | @class GPUImageLuminanceRangeFilter; 6 | 7 | @interface GPUImageiOSBlurFilter : GPUImageFilterGroup 8 | { 9 | GPUImageSaturationFilter *saturationFilter; 10 | GPUImageGaussianBlurFilter *blurFilter; 11 | GPUImageLuminanceRangeFilter *luminanceRangeFilter; 12 | } 13 | 14 | /** A radius in pixels to use for the blur, with a default of 12.0. This adjusts the sigma variable in the Gaussian distribution function. 15 | */ 16 | @property (readwrite, nonatomic) CGFloat blurRadiusInPixels; 17 | 18 | /** Saturation ranges from 0.0 (fully desaturated) to 2.0 (max saturation), with 0.8 as the normal level 19 | */ 20 | @property (readwrite, nonatomic) CGFloat saturation; 21 | 22 | /** The degree to which to downsample, then upsample the incoming image to minimize computations within the Gaussian blur, default of 4.0 23 | */ 24 | @property (readwrite, nonatomic) CGFloat downsampling; 25 | 26 | 27 | /** The degree to reduce the luminance range, from 0.0 to 1.0. Default is 0.6. 28 | */ 29 | @property (readwrite, nonatomic) CGFloat rangeReductionFactor; 30 | 31 | @end 32 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture+TextureSubimage.h: -------------------------------------------------------------------------------- 1 | // 2 | // GPUImagePicture+TextureSubimage.h 3 | // GPUImage 4 | // 5 | // Created by Jack Wu on 2014-05-28. 6 | // Copyright (c) 2014 Brad Larson. All rights reserved. 7 | // 8 | 9 | #import "GPUImagePicture.h" 10 | 11 | @interface GPUImagePicture (TextureSubimage) 12 | 13 | - (void)replaceTextureWithSubimage:(UIImage*)subimage; 14 | - (void)replaceTextureWithSubCGImage:(CGImageRef)subimageSource; 15 | 16 | - (void)replaceTextureWithSubimage:(UIImage*)subimage inRect:(CGRect)subRect; 17 | - (void)replaceTextureWithSubCGImage:(CGImageRef)subimageSource inRect:(CGRect)subRect; 18 | 19 | @end 20 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/iOS/GPUImagePicture.h: -------------------------------------------------------------------------------- 1 | #import 2 | #import "GPUImageOutput.h" 3 | 4 | 5 | @interface GPUImagePicture : GPUImageOutput 6 | { 7 | CGSize pixelSizeOfImage; 8 | BOOL hasProcessedImage; 9 | 10 | dispatch_semaphore_t imageUpdateSemaphore; 11 | } 12 | 13 | // Initialization and teardown 14 | - (id)initWithURL:(NSURL *)url; 15 | - (id)initWithImage:(UIImage *)newImageSource; 16 | - (id)initWithCGImage:(CGImageRef)newImageSource; 17 | - (id)initWithImage:(UIImage *)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput; 18 | - (id)initWithCGImage:(CGImageRef)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput; 19 | - (id)initWithImage:(UIImage *)newImageSource removePremultiplication:(BOOL)removePremultiplication; 20 | - (id)initWithCGImage:(CGImageRef)newImageSource removePremultiplication:(BOOL)removePremultiplication; 21 | - (id)initWithImage:(UIImage *)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput removePremultiplication:(BOOL)removePremultiplication; 22 | - (id)initWithCGImage:(CGImageRef)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput removePremultiplication:(BOOL)removePremultiplication; 23 | 24 | // Image rendering 25 | - (void)processImage; 26 | - (CGSize)outputImageSize; 27 | 28 | /** 29 | * Process image with all targets and filters asynchronously 30 | * The completion handler is called after processing finished in the 31 | * GPU's dispatch queue - and only if this method did not return NO. 32 | * 33 | * @returns NO if resource is blocked and processing is discarded, YES otherwise 34 | */ 35 | - (BOOL)processImageWithCompletionHandler:(void (^)(void))completion; 36 | - (void)processImageUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(UIImage *processedImage))block; 37 | 38 | @end 39 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/GPUImage/iOS/GPUImageView.h: -------------------------------------------------------------------------------- 1 | #import 2 | #import "GPUImageContext.h" 3 | 4 | typedef NS_ENUM(NSUInteger, GPUImageFillModeType) { 5 | kGPUImageFillModeStretch, // Stretch to fill the full view, which may distort the image outside of its normal aspect ratio 6 | kGPUImageFillModePreserveAspectRatio, // Maintains the aspect ratio of the source image, adding bars of the specified background color 7 | kGPUImageFillModePreserveAspectRatioAndFill // Maintains the aspect ratio of the source image, zooming in on its center to fill the view 8 | }; 9 | 10 | 11 | 12 | /** 13 | UIView subclass to use as an endpoint for displaying GPUImage outputs 14 | */ 15 | @interface GPUImageView : UIView 16 | { 17 | GPUImageRotationMode inputRotation; 18 | } 19 | 20 | /** The fill mode dictates how images are fit in the view, with the default being kGPUImageFillModePreserveAspectRatio 21 | */ 22 | @property(readwrite, nonatomic) GPUImageFillModeType fillMode; 23 | 24 | /** This calculates the current display size, in pixels, taking into account Retina scaling factors 25 | */ 26 | @property(readonly, nonatomic) CGSize sizeInPixels; 27 | 28 | @property(nonatomic) BOOL enabled; 29 | 30 | /** Handling fill mode 31 | 32 | @param redComponent Red component for background color 33 | @param greenComponent Green component for background color 34 | @param blueComponent Blue component for background color 35 | @param alphaComponent Alpha component for background color 36 | */ 37 | - (void)setBackgroundColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent; 38 | 39 | - (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue; 40 | 41 | @end 42 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/pili-librtmp/error.c: -------------------------------------------------------------------------------- 1 | #include "error.h" 2 | #include 3 | #include 4 | 5 | void RTMPError_Alloc(RTMPError *error, size_t msg_size) { 6 | RTMPError_Free(error); 7 | 8 | error->code = 0; 9 | error->message = (char *)malloc(msg_size + 1); 10 | memset(error->message, 0, msg_size); 11 | } 12 | 13 | void RTMPError_Free(RTMPError *error) { 14 | if (error) { 15 | if (error->message) { 16 | free(error->message); 17 | error->message = NULL; 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/pili-librtmp/error.h: -------------------------------------------------------------------------------- 1 | #ifndef __ERROR_H__ 2 | #define __ERROR_H__ 3 | 4 | #include 5 | 6 | typedef struct RTMPError { 7 | int code; 8 | char *message; 9 | } RTMPError; 10 | 11 | void RTMPError_Alloc(RTMPError *error, size_t msg_size); 12 | void RTMPError_Free(RTMPError *error); 13 | 14 | // error defines 15 | enum { 16 | RTMPErrorUnknow = -1, // "Unknow error" 17 | RTMPErrorUnknowOption = -999, // "Unknown option %s" 18 | RTMPErrorAccessDNSFailed = -1000, // "Failed to access the DNS. (addr: %s)" 19 | RTMPErrorFailedToConnectSocket = 20 | -1001, // "Failed to connect socket. %d (%s)" 21 | RTMPErrorSocksNegotiationFailed = -1002, // "Socks negotiation failed" 22 | RTMPErrorFailedToCreateSocket = 23 | -1003, // "Failed to create socket. %d (%s)" 24 | RTMPErrorHandshakeFailed = -1004, // "Handshake failed" 25 | RTMPErrorRTMPConnectFailed = -1005, // "RTMP connect failed" 26 | RTMPErrorSendFailed = -1006, // "Send error %d (%s), (%d bytes)" 27 | RTMPErrorServerRequestedClose = -1007, // "RTMP server requested close" 28 | RTMPErrorNetStreamFailed = -1008, // "NetStream failed" 29 | RTMPErrorNetStreamPlayFailed = -1009, // "NetStream play failed" 30 | RTMPErrorNetStreamPlayStreamNotFound = 31 | -1010, // "NetStream play stream not found" 32 | RTMPErrorNetConnectionConnectInvalidApp = 33 | -1011, // "NetConnection connect invalip app" 34 | RTMPErrorSanityFailed = 35 | -1012, // "Sanity failed. Trying to send header of type: 0x%02X" 36 | RTMPErrorSocketClosedByPeer = -1013, // "RTMP socket closed by peer" 37 | RTMPErrorRTMPConnectStreamFailed = -1014, // "RTMP connect stream failed" 38 | RTMPErrorSocketTimeout = -1015, // "RTMP socket timeout" 39 | 40 | // SSL errors 41 | RTMPErrorTLSConnectFailed = -1200, // "TLS_Connect failed" 42 | RTMPErrorNoSSLOrTLSSupport = -1201, // "No SSL/TLS support" 43 | }; 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/pili-librtmp/http.h: -------------------------------------------------------------------------------- 1 | #ifndef __RTMP_HTTP_H__ 2 | #define __RTMP_HTTP_H__ 3 | /* 4 | * Copyright (C) 2010 Howard Chu 5 | * Copyright (C) 2010 Antti Ajanki 6 | * 7 | * This file is part of librtmp. 8 | * 9 | * librtmp is free software; you can redistribute it and/or modify 10 | * it under the terms of the GNU Lesser General Public License as 11 | * published by the Free Software Foundation; either version 2.1, 12 | * or (at your option) any later version. 13 | * 14 | * librtmp is distributed in the hope that it will be useful, 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | * GNU General Public License for more details. 18 | * 19 | * You should have received a copy of the GNU Lesser General Public License 20 | * along with librtmp see the file COPYING. If not, write to 21 | * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, 22 | * Boston, MA 02110-1301, USA. 23 | * http://www.gnu.org/copyleft/lgpl.html 24 | */ 25 | 26 | typedef enum { 27 | HTTPRES_OK, /* result OK */ 28 | HTTPRES_OK_NOT_MODIFIED, /* not modified since last request */ 29 | HTTPRES_NOT_FOUND, /* not found */ 30 | HTTPRES_BAD_REQUEST, /* client error */ 31 | HTTPRES_SERVER_ERROR, /* server reported an error */ 32 | HTTPRES_REDIRECTED, /* resource has been moved */ 33 | HTTPRES_LOST_CONNECTION /* connection lost while waiting for data */ 34 | } HTTPResult; 35 | 36 | struct HTTP_ctx { 37 | char *date; 38 | int size; 39 | int status; 40 | void *data; 41 | }; 42 | 43 | typedef size_t(HTTP_read_callback)(void *ptr, size_t size, size_t nmemb, 44 | void *stream); 45 | 46 | HTTPResult HTTP_get(struct HTTP_ctx *http, const char *url, 47 | HTTP_read_callback *cb); 48 | 49 | #endif 50 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/Vendor/pili-librtmp/log.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2008-2009 Andrej Stepanchuk 3 | * Copyright (C) 2009-2010 Howard Chu 4 | * 5 | * This file is part of librtmp. 6 | * 7 | * librtmp is free software; you can redistribute it and/or modify 8 | * it under the terms of the GNU Lesser General Public License as 9 | * published by the Free Software Foundation; either version 2.1, 10 | * or (at your option) any later version. 11 | * 12 | * librtmp is distributed in the hope that it will be useful, 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | * GNU General Public License for more details. 16 | * 17 | * You should have received a copy of the GNU Lesser General Public License 18 | * along with librtmp see the file COPYING. If not, write to 19 | * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, 20 | * Boston, MA 02110-1301, USA. 21 | * http://www.gnu.org/copyleft/lgpl.html 22 | */ 23 | 24 | #ifndef __RTMP_LOG_H__ 25 | #define __RTMP_LOG_H__ 26 | 27 | #include 28 | #include 29 | #include 30 | 31 | #ifdef __cplusplus 32 | extern "C" { 33 | #endif 34 | /* Enable this to get full debugging output */ 35 | /* #define _DEBUG */ 36 | 37 | #ifdef _DEBUG 38 | #undef NODEBUG 39 | #endif 40 | 41 | typedef enum { 42 | RTMP_LOGCRIT = 0, 43 | RTMP_LOGERROR, 44 | RTMP_LOGWARNING, 45 | RTMP_LOGINFO, 46 | RTMP_LOGDEBUG, 47 | RTMP_LOGDEBUG2, 48 | RTMP_LOGALL 49 | } RTMP_LogLevel; 50 | 51 | extern RTMP_LogLevel RTMP_debuglevel; 52 | 53 | typedef void(RTMP_LogCallback)(int level, const char *fmt, va_list); 54 | void RTMP_LogSetCallback(RTMP_LogCallback *cb); 55 | void RTMP_LogSetOutput(FILE *file); 56 | void RTMP_LogPrintf(const char *format, ...); 57 | void RTMP_LogStatus(const char *format, ...); 58 | void RTMP_Log(int level, const char *format, ...); 59 | void RTMP_LogHex(int level, const uint8_t *data, unsigned long len); 60 | void RTMP_LogHexString(int level, const uint8_t *data, unsigned long len); 61 | void RTMP_LogSetLevel(RTMP_LogLevel lvl); 62 | RTMP_LogLevel RTMP_LogGetLevel(void); 63 | 64 | #ifdef __cplusplus 65 | } 66 | #endif 67 | 68 | #endif 69 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/capture/LFAudioCapture.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFAudioCapture.h 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/1. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import 10 | #import 11 | #import "LFLiveAudioConfiguration.h" 12 | 13 | #pragma mark -- AudioCaptureNotification 14 | /** compoentFialed will post the notification */ 15 | extern NSString *_Nullable const LFAudioComponentFailedToCreateNotification; 16 | 17 | @class LFAudioCapture; 18 | /** LFAudioCapture callback audioData */ 19 | @protocol LFAudioCaptureDelegate 20 | - (void)captureOutput:(nullable LFAudioCapture *)capture audioBuffer:(AudioBufferList)inBufferList; 21 | @end 22 | 23 | 24 | @interface LFAudioCapture : NSObject 25 | 26 | #pragma mark - Attribute 27 | ///============================================================================= 28 | /// @name Attribute 29 | ///============================================================================= 30 | 31 | /** The delegate of the capture. captureData callback */ 32 | @property (nullable, nonatomic, weak) id delegate; 33 | 34 | /** The muted control callbackAudioData,muted will memset 0.*/ 35 | @property (nonatomic, assign) BOOL muted; 36 | 37 | /** The running control start capture or stop capture*/ 38 | @property (nonatomic, assign) BOOL running; 39 | 40 | #pragma mark - Initializer 41 | ///============================================================================= 42 | /// @name Initializer 43 | ///============================================================================= 44 | - (nullable instancetype)init UNAVAILABLE_ATTRIBUTE; 45 | + (nullable instancetype)new UNAVAILABLE_ATTRIBUTE; 46 | 47 | /** 48 | The designated initializer. Multiple instances with the same configuration will make the 49 | capture unstable. 50 | */ 51 | - (nullable instancetype)initWithAudioConfiguration:(nullable LFLiveAudioConfiguration *)configuration NS_DESIGNATED_INITIALIZER; 52 | 53 | @end 54 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/coder/H264/LFAVEncoder.h: -------------------------------------------------------------------------------- 1 | // 2 | // AVEncoder.h 3 | // Encoder Demo 4 | // 5 | // Created by Geraint Davies on 14/01/2013. 6 | // Copyright (c) 2013 GDCL http://www.gdcl.co.uk/license.htm 7 | // 8 | 9 | #import 10 | #import 11 | #import 12 | #import 13 | #import 14 | #import "sys/stat.h" 15 | #import "LFVideoEncoder.h" 16 | #import "LFMP4Atom.h" 17 | 18 | typedef int (^encoder_handler_t)(NSArray *data, CMTimeValue ptsValue); 19 | typedef int (^param_handler_t)(NSData *params); 20 | 21 | @interface LFAVEncoder : NSObject 22 | 23 | @property (atomic) NSUInteger bitrate; 24 | 25 | + (LFAVEncoder *)encoderForHeight:(int)height andWidth:(int)width bitrate:(int)bitrate; 26 | 27 | - (void)encodeWithBlock:(encoder_handler_t)block onParams:(param_handler_t)paramsHandler; 28 | - (void)encodeFrame:(CMSampleBufferRef)sampleBuffer; 29 | - (void)encodePixelBuffer:(CVPixelBufferRef)pixelBuffer pts:(CMTime)pts; 30 | - (NSData *)getConfigData; 31 | - (void)shutdown; 32 | 33 | 34 | @property (readonly, atomic) int bitspersecond; 35 | 36 | @end 37 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/coder/H264/LFMP4Atom.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFMP4Atom.h 3 | // Encoder Demo 4 | // 5 | // Created by Geraint Davies on 15/01/2013. 6 | // Copyright (c) 2013 GDCL http://www.gdcl.co.uk/license.htm 7 | // 8 | 9 | #import 10 | 11 | @interface LFMP4Atom : NSObject 12 | 13 | { 14 | NSFileHandle *_file; 15 | int64_t _offset; 16 | int64_t _length; 17 | OSType _type; 18 | int64_t _nextChild; 19 | } 20 | @property OSType type; 21 | @property int64_t length; 22 | 23 | + (LFMP4Atom *)atomAt:(int64_t)offset size:(int)length type:(OSType)fourcc inFile:(NSFileHandle *)handle; 24 | - (BOOL)init:(int64_t)offset size:(int)length type:(OSType)fourcc inFile:(NSFileHandle *)handle; 25 | - (NSData *)readAt:(int64_t)offset size:(int)length; 26 | - (BOOL)setChildOffset:(int64_t)offset; 27 | - (LFMP4Atom *)nextChild; 28 | - (LFMP4Atom *)childOfType:(OSType)fourcc startAt:(int64_t)offset; 29 | 30 | @end 31 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/coder/H264/LFVideoEncoder.h: -------------------------------------------------------------------------------- 1 | // 2 | // VideoEncoder.h 3 | // Encoder Demo 4 | // 5 | // Created by Geraint Davies on 14/01/2013. 6 | // Copyright (c) 2013 GDCL http://www.gdcl.co.uk/license.htm 7 | // 8 | 9 | #import 10 | #import "AVFoundation/AVAssetWriter.h" 11 | #import "AVFoundation/AVAssetWriterInput.h" 12 | #import "AVFoundation/AVMediaFormat.h" 13 | #import "AVFoundation/AVVideoSettings.h" 14 | 15 | @interface LFVideoEncoder : NSObject 16 | 17 | 18 | @property NSString *path; 19 | @property (nonatomic, readonly) NSUInteger bitrate; 20 | 21 | + (LFVideoEncoder *)encoderForPath:(NSString *)path Height:(int)height andWidth:(int)width bitrate:(int)bitrate; 22 | 23 | - (void)initPath:(NSString *)path Height:(int)height andWidth:(int)width bitrate:(int)bitrate; 24 | - (void)finishWithCompletionHandler:(void (^)(void))handler; 25 | - (BOOL)encodeFrame:(CMSampleBufferRef)sampleBuffer; 26 | 27 | @end 28 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/coder/LFAudioEncoding.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFAudioEncoding.h 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import 10 | #import 11 | #import "LFAudioFrame.h" 12 | #import "LFLiveAudioConfiguration.h" 13 | 14 | @protocol LFAudioEncoding; 15 | /// 编码器编码后回调 16 | @protocol LFAudioEncodingDelegate 17 | @required 18 | - (void)audioEncoder:(nullable id)encoder audioFrame:(nullable LFAudioFrame *)frame; 19 | @end 20 | 21 | /// 编码器抽象的接口 22 | @protocol LFAudioEncoding 23 | @required 24 | - (void)encodeAudioData:(AudioBufferList)inBufferList timeStamp:(uint64_t)timeStamp; 25 | - (void)stopEncoder; 26 | @optional 27 | - (nullable instancetype)initWithAudioStreamConfiguration:(nullable LFLiveAudioConfiguration *)configuration; 28 | - (void)setDelegate:(nullable id)delegate; 29 | - (nullable NSData *)adtsData:(NSInteger)channel rawDataLength:(NSInteger)rawDataLength; 30 | @end 31 | 32 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/coder/LFH264VideoEncoder.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFH264VideoEncoder 3 | // LFLiveKit 4 | // 5 | // Created by feng on 7/5/16. 6 | // Copyright (c) 2014 zhanqi.tv. All rights reserved. 7 | // 8 | #import "LFVideoEncoding.h" 9 | 10 | @interface LFH264VideoEncoder : NSObject { 11 | 12 | } 13 | 14 | - (void)shutdown; 15 | 16 | @end 17 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/coder/LFHardwareAudioEncoder.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFHardwareAudioEncoder.h 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import "LFAudioEncoding.h" 10 | 11 | @interface LFHardwareAudioEncoder : NSObject 12 | 13 | #pragma mark - Initializer 14 | ///============================================================================= 15 | /// @name Initializer 16 | ///============================================================================= 17 | - (nullable instancetype)init UNAVAILABLE_ATTRIBUTE; 18 | + (nullable instancetype)new UNAVAILABLE_ATTRIBUTE; 19 | 20 | @end 21 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/coder/LFHardwareVideoEncoder.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFHardwareVideoEncoder.h 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import "LFVideoEncoding.h" 10 | 11 | @interface LFHardwareVideoEncoder : NSObject 12 | 13 | #pragma mark - Initializer 14 | ///============================================================================= 15 | /// @name Initializer 16 | ///============================================================================= 17 | - (nullable instancetype)init UNAVAILABLE_ATTRIBUTE; 18 | + (nullable instancetype)new UNAVAILABLE_ATTRIBUTE; 19 | 20 | @end 21 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/coder/LFVideoEncoding.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFVideoEncoding.h 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import 10 | #import "LFVideoFrame.h" 11 | #import "LFLiveVideoConfiguration.h" 12 | 13 | @protocol LFVideoEncoding; 14 | /// 编码器编码后回调 15 | @protocol LFVideoEncodingDelegate 16 | @required 17 | - (void)videoEncoder:(nullable id)encoder videoFrame:(nullable LFVideoFrame *)frame; 18 | @end 19 | 20 | /// 编码器抽象的接口 21 | @protocol LFVideoEncoding 22 | @required 23 | - (void)encodeVideoData:(nullable CVPixelBufferRef)pixelBuffer timeStamp:(uint64_t)timeStamp; 24 | - (void)stopEncoder; 25 | @optional 26 | @property (nonatomic, assign) NSInteger videoBitRate; 27 | - (nullable instancetype)initWithVideoStreamConfiguration:(nullable LFLiveVideoConfiguration *)configuration; 28 | - (void)setDelegate:(nullable id)delegate; 29 | 30 | @end 31 | 32 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/filter/LFGPUImageBeautyFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface LFGPUImageBeautyFilter : GPUImageFilter { 4 | } 5 | 6 | @property (nonatomic, assign) CGFloat beautyLevel; 7 | @property (nonatomic, assign) CGFloat brightLevel; 8 | @property (nonatomic, assign) CGFloat toneLevel; 9 | @end 10 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/filter/LFGPUImageEmptyFilter.h: -------------------------------------------------------------------------------- 1 | #import "GPUImageFilter.h" 2 | 3 | @interface LFGPUImageEmptyFilter : GPUImageFilter 4 | { 5 | } 6 | 7 | @end 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/filter/LFGPUImageEmptyFilter.m: -------------------------------------------------------------------------------- 1 | #import "LFGPUImageEmptyFilter.h" 2 | 3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE 4 | NSString *const kLFGPUImageEmptyFragmentShaderString = SHADER_STRING 5 | ( 6 | varying highp vec2 textureCoordinate; 7 | 8 | uniform sampler2D inputImageTexture; 9 | 10 | void main(){ 11 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 12 | 13 | gl_FragColor = vec4((textureColor.rgb), textureColor.w); 14 | } 15 | 16 | ); 17 | #else 18 | NSString *const kGPUImageInvertFragmentShaderString = SHADER_STRING 19 | ( 20 | varying vec2 textureCoordinate; 21 | 22 | uniform sampler2D inputImageTexture; 23 | 24 | void main(){ 25 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate); 26 | 27 | gl_FragColor = vec4((textureColor.rgb), textureColor.w); 28 | } 29 | 30 | ); 31 | #endif 32 | 33 | @implementation LFGPUImageEmptyFilter 34 | 35 | - (id)init; 36 | { 37 | if (!(self = [super initWithFragmentShaderFromString:kLFGPUImageEmptyFragmentShaderString])) { 38 | return nil; 39 | } 40 | 41 | return self; 42 | } 43 | 44 | @end 45 | 46 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/objects/LFAudioFrame.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFAudioFrame.h 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import "LFFrame.h" 10 | 11 | @interface LFAudioFrame : LFFrame 12 | 13 | /// flv打包中aac的header 14 | @property (nonatomic, strong) NSData *audioInfo; 15 | 16 | @end 17 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/objects/LFAudioFrame.m: -------------------------------------------------------------------------------- 1 | // 2 | // LFAudioFrame.m 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import "LFAudioFrame.h" 10 | 11 | @implementation LFAudioFrame 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/objects/LFFrame.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFFrame.h 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import 10 | 11 | @interface LFFrame : NSObject 12 | 13 | @property (nonatomic, assign) uint64_t timestamp; 14 | @property (nonatomic, strong) NSData *data; 15 | ///< flv或者rtmp包头 16 | @property (nonatomic, strong) NSData *header; 17 | 18 | @end 19 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/objects/LFFrame.m: -------------------------------------------------------------------------------- 1 | // 2 | // LFFrame.m 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import "LFFrame.h" 10 | 11 | @implementation LFFrame 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/objects/LFLiveDebug.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFLiveDebug.h 3 | // LaiFeng 4 | // 5 | // Created by admin on 16/5/19. 6 | // Copyright © 2016年 live Interactive. All rights reserved. 7 | // 8 | 9 | #import 10 | #import 11 | 12 | @interface LFLiveDebug : NSObject 13 | 14 | @property (nonatomic, copy) NSString *streamId; ///< 流id 15 | @property (nonatomic, copy) NSString *uploadUrl; ///< 流地址 16 | @property (nonatomic, assign) CGSize videoSize; ///< 上传的分辨率 17 | @property (nonatomic, assign) BOOL isRtmp; ///< 上传方式(TCP or RTMP) 18 | 19 | @property (nonatomic, assign) CGFloat elapsedMilli; ///< 距离上次统计的时间 单位ms 20 | @property (nonatomic, assign) CGFloat timeStamp; ///< 当前的时间戳,从而计算1s内数据 21 | @property (nonatomic, assign) CGFloat dataFlow; ///< 总流量 22 | @property (nonatomic, assign) CGFloat bandwidth; ///< 1s内总带宽 23 | @property (nonatomic, assign) CGFloat currentBandwidth; ///< 上次的带宽 24 | 25 | @property (nonatomic, assign) NSInteger dropFrame; ///< 丢掉的帧数 26 | @property (nonatomic, assign) NSInteger totalFrame; ///< 总帧数 27 | 28 | @property (nonatomic, assign) NSInteger capturedAudioCount; ///< 1s内音频捕获个数 29 | @property (nonatomic, assign) NSInteger capturedVideoCount; ///< 1s内视频捕获个数 30 | @property (nonatomic, assign) NSInteger currentCapturedAudioCount; ///< 上次的音频捕获个数 31 | @property (nonatomic, assign) NSInteger currentCapturedVideoCount; ///< 上次的视频捕获个数 32 | 33 | @property (nonatomic, assign) NSInteger unSendCount; ///< 未发送个数(代表当前缓冲区等待发送的) 34 | 35 | @end 36 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/objects/LFLiveDebug.m: -------------------------------------------------------------------------------- 1 | // 2 | // LFLiveDebug.m 3 | // LaiFeng 4 | // 5 | // Created by admin on 16/5/19. 6 | // Copyright © 2016年 live Interactive. All rights reserved. 7 | // 8 | 9 | #import "LFLiveDebug.h" 10 | 11 | @implementation LFLiveDebug 12 | 13 | 14 | @end 15 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/objects/LFLiveStreamInfo.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFLiveStreamInfo.h 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 真正的上传地址 token等 8 | 9 | #import 10 | #import "LFLiveAudioConfiguration.h" 11 | #import "LFLiveVideoConfiguration.h" 12 | 13 | /// 流状态 14 | typedef NS_ENUM (NSUInteger, LFLiveState){ 15 | /// 准备 16 | LFLiveReady = 0, 17 | /// 连接中 18 | LFLivePending = 1, 19 | /// 已连接 20 | LFLiveStart = 2, 21 | /// 已断开 22 | LFLiveStop = 3, 23 | /// 连接出错 24 | LFLiveError = 4 25 | }; 26 | 27 | typedef NS_ENUM (NSUInteger, LFLiveSocketErrorCode) { 28 | LFLiveSocketError_PreView = 201, ///< 预览失败 29 | LFLiveSocketError_GetStreamInfo = 202, ///< 获取流媒体信息失败 30 | LFLiveSocketError_ConnectSocket = 203, ///< 连接socket失败 31 | LFLiveSocketError_Verification = 204, ///< 验证服务器失败 32 | LFLiveSocketError_ReConnectTimeOut = 205 ///< 重新连接服务器超时 33 | }; 34 | 35 | @interface LFLiveStreamInfo : NSObject 36 | 37 | @property (nonatomic, copy) NSString *streamId; 38 | 39 | #pragma mark -- FLV 40 | @property (nonatomic, copy) NSString *host; 41 | @property (nonatomic, assign) NSInteger port; 42 | #pragma mark -- RTMP 43 | @property (nonatomic, copy) NSString *url; ///< 上传地址 (RTMP用就好了) 44 | ///音频配置 45 | @property (nonatomic, strong) LFLiveAudioConfiguration *audioConfiguration; 46 | ///视频配置 47 | @property (nonatomic, strong) LFLiveVideoConfiguration *videoConfiguration; 48 | ///是否丢帧 49 | @property (nonatomic, assign) BOOL needDropFrame; 50 | 51 | @end 52 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/objects/LFLiveStreamInfo.m: -------------------------------------------------------------------------------- 1 | // 2 | // LFLiveStreamInfo.m 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import "LFLiveStreamInfo.h" 10 | 11 | @implementation LFLiveStreamInfo 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/objects/LFVideoFrame.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFVideoFrame.h 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import "LFFrame.h" 10 | 11 | @interface LFVideoFrame : LFFrame 12 | 13 | @property (nonatomic, assign) BOOL isKeyFrame; 14 | @property (nonatomic, strong) NSData *sps; 15 | @property (nonatomic, strong) NSData *pps; 16 | 17 | @end 18 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/objects/LFVideoFrame.m: -------------------------------------------------------------------------------- 1 | // 2 | // LFVideoFrame.m 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import "LFVideoFrame.h" 10 | 11 | @implementation LFVideoFrame 12 | 13 | @end 14 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/publish/LFStreamRTMPSocket.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFStreamRTMPSocket.h 3 | // LaiFeng 4 | // 5 | // Created by admin on 16/5/18. 6 | // Copyright © 2016年 live Interactive. All rights reserved. 7 | // 8 | 9 | #import "LFStreamSocket.h" 10 | 11 | @interface LFStreamRTMPSocket : NSObject 12 | 13 | #pragma mark - Initializer 14 | ///============================================================================= 15 | /// @name Initializer 16 | ///============================================================================= 17 | - (nullable instancetype)init UNAVAILABLE_ATTRIBUTE; 18 | + (nullable instancetype)new UNAVAILABLE_ATTRIBUTE; 19 | 20 | @end 21 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/publish/LFStreamSocket.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFStreamSocket.h 3 | // LFLiveKit 4 | // 5 | // Created by admin on 16/5/3. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import 10 | #import "LFLiveStreamInfo.h" 11 | #import "LFStreamingBuffer.h" 12 | #import "LFLiveDebug.h" 13 | 14 | @protocol LFStreamSocket; 15 | @protocol LFStreamSocketDelegate 16 | 17 | /** callback buffer current status (回调当前缓冲区情况,可实现相关切换帧率 码率等策略)*/ 18 | - (void)socketBufferStatus:(nullable id )socket status:(LFLiveBuffferState)status; 19 | /** callback socket current status (回调当前网络情况) */ 20 | - (void)socketStatus:(nullable id )socket status:(LFLiveState)status; 21 | /** callback socket errorcode */ 22 | - (void)socketDidError:(nullable id )socket errorCode:(LFLiveSocketErrorCode)errorCode; 23 | @optional 24 | /** callback debugInfo */ 25 | - (void)socketDebug:(nullable id )socket debugInfo:(nullable LFLiveDebug *)debugInfo; 26 | @end 27 | 28 | @protocol LFStreamSocket 29 | - (void)start; 30 | - (void)stop; 31 | - (void)sendFrame:(nullable LFFrame *)frame; 32 | - (void)setDelegate:(nullable id )delegate; 33 | @optional 34 | - (nullable instancetype)initWithStream:(nullable LFLiveStreamInfo *)stream; 35 | - (nullable instancetype)initWithStream:(nullable LFLiveStreamInfo *)stream reconnectInterval:(NSInteger)reconnectInterval reconnectCount:(NSInteger)reconnectCount; 36 | @end 37 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/publish/LFStreamingBuffer.h: -------------------------------------------------------------------------------- 1 | // 2 | // LFStreamingBuffer.h 3 | // LFLiveKit 4 | // 5 | // Created by 倾慕 on 16/5/2. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import 10 | #import "LFAudioFrame.h" 11 | #import "LFVideoFrame.h" 12 | 13 | /** current buffer status */ 14 | typedef NS_ENUM (NSUInteger, LFLiveBuffferState) { 15 | LFLiveBuffferUnknown = 0, //< 未知 16 | LFLiveBuffferIncrease = 1, //< 缓冲区状态差应该降低码率 17 | LFLiveBuffferDecline = 2 //< 缓冲区状态好应该提升码率 18 | }; 19 | 20 | @class LFStreamingBuffer; 21 | /** this two method will control videoBitRate */ 22 | @protocol LFStreamingBufferDelegate 23 | @optional 24 | /** 当前buffer变动(增加or减少) 根据buffer中的updateInterval时间回调*/ 25 | - (void)streamingBuffer:(nullable LFStreamingBuffer *)buffer bufferState:(LFLiveBuffferState)state; 26 | @end 27 | 28 | @interface LFStreamingBuffer : NSObject 29 | 30 | /** The needDropFrame control Dynamic frame loss ,default is YES */ 31 | @property (nonatomic, assign) BOOL needDropFrame; 32 | 33 | /** The delegate of the buffer. buffer callback */ 34 | @property (nullable, nonatomic, weak) id delegate; 35 | 36 | /** current frame buffer */ 37 | @property (nonatomic, strong, readonly) NSMutableArray *_Nonnull list; 38 | 39 | /** buffer count max size default 1000 */ 40 | @property (nonatomic, assign) NSUInteger maxCount; 41 | 42 | /** count of drop frames in last time */ 43 | @property (nonatomic, assign) NSInteger lastDropFrames; 44 | 45 | /** add frame to buffer */ 46 | - (void)appendObject:(nullable LFFrame *)frame; 47 | 48 | /** pop the first frome buffer */ 49 | - (nullable LFFrame *)popFirstObject; 50 | 51 | /** remove all objects from Buffer */ 52 | - (void)removeAllObject; 53 | 54 | @end 55 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/publish/NSMutableArray+LFAdd.h: -------------------------------------------------------------------------------- 1 | // 2 | // NSMutableArray+LFAdd.h 3 | // YYKit 4 | // 5 | // Created by admin on 16/5/20. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import 10 | 11 | @interface NSMutableArray (YYAdd) 12 | 13 | /** 14 | Removes and returns the object with the lowest-valued index in the array. 15 | If the array is empty, it just returns nil. 16 | 17 | @return The first object, or nil. 18 | */ 19 | - (nullable id)lfPopFirstObject; 20 | 21 | @end 22 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/LFLiveKit/publish/NSMutableArray+LFAdd.m: -------------------------------------------------------------------------------- 1 | // 2 | // NSMutableArray+LFAdd.m 3 | // YYKit 4 | // 5 | // Created by admin on 16/5/20. 6 | // Copyright © 2016年 倾慕. All rights reserved. 7 | // 8 | 9 | #import "NSMutableArray+LFAdd.h" 10 | 11 | @implementation NSMutableArray (YYAdd) 12 | 13 | - (void)lfRemoveFirstObject { 14 | if (self.count) { 15 | [self removeObjectAtIndex:0]; 16 | } 17 | } 18 | 19 | - (id)lfPopFirstObject { 20 | id obj = nil; 21 | if (self.count) { 22 | obj = self.firstObject; 23 | [self lfRemoveFirstObject]; 24 | } 25 | return obj; 26 | } 27 | 28 | @end 29 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/RCTLFLiveKit.xcodeproj/project.xcworkspace/contents.xcworkspacedata: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/RCTLFLiveKit.xcodeproj/xcuserdata/onbing.xcuserdatad/xcschemes/xcschememanagement.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | SchemeUserState 6 | 7 | RCTLFLiveKit.xcscheme 8 | 9 | orderHint 10 | 1 11 | 12 | 13 | SuppressBuildableAutocreation 14 | 15 | 95696ED61D59C63D00FF600F 16 | 17 | primary 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/RCTLFLiveKit/RCTStream.h: -------------------------------------------------------------------------------- 1 | // 2 | // RCTStream.h 3 | // RCTLFLiveKit 4 | // 5 | 6 | #import 7 | 8 | @class RCTStreamManager; 9 | 10 | @interface RCTStream : UIView 11 | 12 | - (id) initWithManager: (RCTStreamManager*) manager bridge:(RCTBridge *) bridge; 13 | 14 | @end 15 | -------------------------------------------------------------------------------- /ios/RCTLFLiveKit/RCTLFLiveKit/RCTStreamManager.h: -------------------------------------------------------------------------------- 1 | // 2 | // RCTStreamManager.h 3 | // RCTLFLiveKit 4 | // 5 | 6 | #import "RCTViewManager.h" 7 | 8 | @interface RCTStreamManager : RCTViewManager 9 | 10 | @end 11 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "rn-live-stream", 3 | "version": "0.0.1", 4 | "description": "live video stream for react-native", 5 | "main": "index.js", 6 | "scripts": {}, 7 | "repository": { 8 | "type": "git", 9 | "url": "git+https://github.com/yanbingbing/rn-live-stream.git" 10 | }, 11 | "keywords": [ 12 | "react", 13 | "react-native", 14 | "react-native-video", 15 | "react-native-stream", 16 | "react-native-live-stream" 17 | ], 18 | "author": "onbing", 19 | "license": "MIT", 20 | "bugs": { 21 | "url": "https://github.com/yanbingbing/rn-live-stream/issues" 22 | }, 23 | "homepage": "https://github.com/yanbingbing/rn-live-stream#readme", 24 | "devDependencies": { 25 | "react": "^15.3.0", 26 | "react-native": "^0.30.0" 27 | } 28 | } 29 | --------------------------------------------------------------------------------