├── .DS_Store
├── AWLive.xcodeproj
├── project.pbxproj
├── project.xcworkspace
│ ├── contents.xcworkspacedata
│ ├── xcshareddata
│ │ └── IDEWorkspaceChecks.plist
│ └── xcuserdata
│ │ └── pengchao.xcuserdatad
│ │ ├── IDEFindNavigatorScopes.plist
│ │ └── UserInterfaceState.xcuserstate
└── xcuserdata
│ └── pengchao.xcuserdatad
│ ├── xcdebugger
│ └── Breakpoints_v2.xcbkptlist
│ └── xcschemes
│ └── xcschememanagement.plist
├── AWLive
├── .DS_Store
├── AppDelegate.h
├── AppDelegate.m
├── Assets.xcassets
│ ├── .DS_Store
│ ├── AppIcon.appiconset
│ │ └── Contents.json
│ ├── Contents.json
│ └── image.imageset
│ │ ├── 1f6f9a9064f5411128210318f7262eed.jpeg
│ │ └── Contents.json
├── Base.lproj
│ ├── LaunchScreen.storyboard
│ └── Main.storyboard
├── Info.plist
├── PushStream
│ ├── .DS_Store
│ ├── Capture
│ │ ├── AWAVBaseCapture.h
│ │ ├── AWAVBaseCapture.m
│ │ ├── AWAVCaptureManager.h
│ │ ├── AWAVCaptureManager.m
│ │ ├── AWAVConfig.h
│ │ ├── AWAVConfig.m
│ │ ├── AWFileManager.h
│ │ ├── AWFileManager.m
│ │ ├── AWGPUImageAVCapture.h
│ │ ├── AWGPUImageAVCapture.m
│ │ ├── AWSystemAVCapture.h
│ │ ├── AWSystemAVCapture.m
│ │ ├── AWSystemPreview.h
│ │ └── AWSystemPreview.m
│ ├── Encoder
│ │ ├── .DS_Store
│ │ ├── AWEncoderManager.h
│ │ ├── AWEncoderManager.m
│ │ ├── Base
│ │ │ ├── AWAudioEncoder.h
│ │ │ ├── AWAudioEncoder.m
│ │ │ ├── AWBaseEncoder.h
│ │ │ ├── AWBaseEncoder.m
│ │ │ ├── AWVideoEncoder.h
│ │ │ └── AWVideoEncoder.m
│ │ ├── HW
│ │ │ ├── AWHWAACEncoder.h
│ │ │ ├── AWHWAACEncoder.m
│ │ │ ├── AWHWH264Encoder.h
│ │ │ └── AWHWH264Encoder.m
│ │ └── SW
│ │ │ ├── AWSWFaacEncoder.h
│ │ │ ├── AWSWFaacEncoder.m
│ │ │ ├── AWSWX264Encoder.h
│ │ │ └── AWSWX264Encoder.m
│ └── GpuImage
│ │ ├── AWGPUImageVideoCamera.h
│ │ ├── AWGPUImageVideoCamera.m
│ │ ├── GPUImageBeautifyFilter.h
│ │ └── GPUImageBeautifyFilter.m
├── Test
│ ├── AWLiveCapture.h
│ └── AWLiveCapture.m
├── ViewController.h
├── ViewController.m
├── camera_switch.png
└── main.m
├── GPUImage
├── .DS_Store
├── GPUImage.xcodeproj
│ ├── project.pbxproj
│ ├── project.xcworkspace
│ │ └── contents.xcworkspacedata
│ ├── xcshareddata
│ │ └── xcschemes
│ │ │ ├── Documentation.xcscheme
│ │ │ ├── GPUImage.xcscheme
│ │ │ └── GPUImageFramework.xcscheme
│ └── xcuserdata
│ │ └── pengchao.xcuserdatad
│ │ └── xcschemes
│ │ └── xcschememanagement.plist
├── GPUImageMac.xcodeproj
│ ├── project.pbxproj
│ └── xcshareddata
│ │ └── xcschemes
│ │ └── GPUImage.xcscheme
├── Resources
│ ├── lookup.png
│ ├── lookup_amatorka.png
│ ├── lookup_miss_etikate.png
│ ├── lookup_soft_elegance_1.png
│ └── lookup_soft_elegance_2.png
└── Source
│ ├── .DS_Store
│ ├── GLProgram.h
│ ├── GLProgram.m
│ ├── GPUImage.h
│ ├── GPUImage3x3ConvolutionFilter.h
│ ├── GPUImage3x3ConvolutionFilter.m
│ ├── GPUImage3x3TextureSamplingFilter.h
│ ├── GPUImage3x3TextureSamplingFilter.m
│ ├── GPUImageAdaptiveThresholdFilter.h
│ ├── GPUImageAdaptiveThresholdFilter.m
│ ├── GPUImageAddBlendFilter.h
│ ├── GPUImageAddBlendFilter.m
│ ├── GPUImageAlphaBlendFilter.h
│ ├── GPUImageAlphaBlendFilter.m
│ ├── GPUImageAmatorkaFilter.h
│ ├── GPUImageAmatorkaFilter.m
│ ├── GPUImageAverageColor.h
│ ├── GPUImageAverageColor.m
│ ├── GPUImageAverageLuminanceThresholdFilter.h
│ ├── GPUImageAverageLuminanceThresholdFilter.m
│ ├── GPUImageBilateralFilter.h
│ ├── GPUImageBilateralFilter.m
│ ├── GPUImageBoxBlurFilter.h
│ ├── GPUImageBoxBlurFilter.m
│ ├── GPUImageBrightnessFilter.h
│ ├── GPUImageBrightnessFilter.m
│ ├── GPUImageBuffer.h
│ ├── GPUImageBuffer.m
│ ├── GPUImageBulgeDistortionFilter.h
│ ├── GPUImageBulgeDistortionFilter.m
│ ├── GPUImageCGAColorspaceFilter.h
│ ├── GPUImageCGAColorspaceFilter.m
│ ├── GPUImageCannyEdgeDetectionFilter.h
│ ├── GPUImageCannyEdgeDetectionFilter.m
│ ├── GPUImageChromaKeyBlendFilter.h
│ ├── GPUImageChromaKeyBlendFilter.m
│ ├── GPUImageChromaKeyFilter.h
│ ├── GPUImageChromaKeyFilter.m
│ ├── GPUImageClosingFilter.h
│ ├── GPUImageClosingFilter.m
│ ├── GPUImageColorBlendFilter.h
│ ├── GPUImageColorBlendFilter.m
│ ├── GPUImageColorBurnBlendFilter.h
│ ├── GPUImageColorBurnBlendFilter.m
│ ├── GPUImageColorConversion.h
│ ├── GPUImageColorConversion.m
│ ├── GPUImageColorDodgeBlendFilter.h
│ ├── GPUImageColorDodgeBlendFilter.m
│ ├── GPUImageColorInvertFilter.h
│ ├── GPUImageColorInvertFilter.m
│ ├── GPUImageColorLocalBinaryPatternFilter.h
│ ├── GPUImageColorLocalBinaryPatternFilter.m
│ ├── GPUImageColorMatrixFilter.h
│ ├── GPUImageColorMatrixFilter.m
│ ├── GPUImageColorPackingFilter.h
│ ├── GPUImageColorPackingFilter.m
│ ├── GPUImageColourFASTFeatureDetector.h
│ ├── GPUImageColourFASTFeatureDetector.m
│ ├── GPUImageColourFASTSamplingOperation.h
│ ├── GPUImageColourFASTSamplingOperation.m
│ ├── GPUImageContrastFilter.h
│ ├── GPUImageContrastFilter.m
│ ├── GPUImageCropFilter.h
│ ├── GPUImageCropFilter.m
│ ├── GPUImageCrosshairGenerator.h
│ ├── GPUImageCrosshairGenerator.m
│ ├── GPUImageCrosshatchFilter.h
│ ├── GPUImageCrosshatchFilter.m
│ ├── GPUImageDarkenBlendFilter.h
│ ├── GPUImageDarkenBlendFilter.m
│ ├── GPUImageDifferenceBlendFilter.h
│ ├── GPUImageDifferenceBlendFilter.m
│ ├── GPUImageDilationFilter.h
│ ├── GPUImageDilationFilter.m
│ ├── GPUImageDirectionalNonMaximumSuppressionFilter.h
│ ├── GPUImageDirectionalNonMaximumSuppressionFilter.m
│ ├── GPUImageDirectionalSobelEdgeDetectionFilter.h
│ ├── GPUImageDirectionalSobelEdgeDetectionFilter.m
│ ├── GPUImageDissolveBlendFilter.h
│ ├── GPUImageDissolveBlendFilter.m
│ ├── GPUImageDivideBlendFilter.h
│ ├── GPUImageDivideBlendFilter.m
│ ├── GPUImageEmbossFilter.h
│ ├── GPUImageEmbossFilter.m
│ ├── GPUImageErosionFilter.h
│ ├── GPUImageErosionFilter.m
│ ├── GPUImageExclusionBlendFilter.h
│ ├── GPUImageExclusionBlendFilter.m
│ ├── GPUImageExposureFilter.h
│ ├── GPUImageExposureFilter.m
│ ├── GPUImageFASTCornerDetectionFilter.h
│ ├── GPUImageFASTCornerDetectionFilter.m
│ ├── GPUImageFalseColorFilter.h
│ ├── GPUImageFalseColorFilter.m
│ ├── GPUImageFilter.h
│ ├── GPUImageFilter.m
│ ├── GPUImageFilterGroup.h
│ ├── GPUImageFilterGroup.m
│ ├── GPUImageFilterPipeline.h
│ ├── GPUImageFilterPipeline.m
│ ├── GPUImageFourInputFilter.h
│ ├── GPUImageFourInputFilter.m
│ ├── GPUImageFramebuffer.h
│ ├── GPUImageFramebuffer.m
│ ├── GPUImageFramebufferCache.h
│ ├── GPUImageFramebufferCache.m
│ ├── GPUImageGammaFilter.h
│ ├── GPUImageGammaFilter.m
│ ├── GPUImageGaussianBlurFilter.h
│ ├── GPUImageGaussianBlurFilter.m
│ ├── GPUImageGaussianBlurPositionFilter.h
│ ├── GPUImageGaussianBlurPositionFilter.m
│ ├── GPUImageGaussianSelectiveBlurFilter.h
│ ├── GPUImageGaussianSelectiveBlurFilter.m
│ ├── GPUImageGlassSphereFilter.h
│ ├── GPUImageGlassSphereFilter.m
│ ├── GPUImageGrayscaleFilter.h
│ ├── GPUImageGrayscaleFilter.m
│ ├── GPUImageHSBFilter.h
│ ├── GPUImageHSBFilter.m
│ ├── GPUImageHalftoneFilter.h
│ ├── GPUImageHalftoneFilter.m
│ ├── GPUImageHardLightBlendFilter.h
│ ├── GPUImageHardLightBlendFilter.m
│ ├── GPUImageHarrisCornerDetectionFilter.h
│ ├── GPUImageHarrisCornerDetectionFilter.m
│ ├── GPUImageHazeFilter.h
│ ├── GPUImageHazeFilter.m
│ ├── GPUImageHighPassFilter.h
│ ├── GPUImageHighPassFilter.m
│ ├── GPUImageHighlightShadowFilter.h
│ ├── GPUImageHighlightShadowFilter.m
│ ├── GPUImageHighlightShadowTintFilter.h
│ ├── GPUImageHighlightShadowTintFilter.m
│ ├── GPUImageHistogramEqualizationFilter.h
│ ├── GPUImageHistogramEqualizationFilter.m
│ ├── GPUImageHistogramFilter.h
│ ├── GPUImageHistogramFilter.m
│ ├── GPUImageHistogramGenerator.h
│ ├── GPUImageHistogramGenerator.m
│ ├── GPUImageHoughTransformLineDetector.h
│ ├── GPUImageHoughTransformLineDetector.m
│ ├── GPUImageHueBlendFilter.h
│ ├── GPUImageHueBlendFilter.m
│ ├── GPUImageHueFilter.h
│ ├── GPUImageHueFilter.m
│ ├── GPUImageJFAVoronoiFilter.h
│ ├── GPUImageJFAVoronoiFilter.m
│ ├── GPUImageKuwaharaFilter.h
│ ├── GPUImageKuwaharaFilter.m
│ ├── GPUImageKuwaharaRadius3Filter.h
│ ├── GPUImageKuwaharaRadius3Filter.m
│ ├── GPUImageLanczosResamplingFilter.h
│ ├── GPUImageLanczosResamplingFilter.m
│ ├── GPUImageLaplacianFilter.h
│ ├── GPUImageLaplacianFilter.m
│ ├── GPUImageLevelsFilter.h
│ ├── GPUImageLevelsFilter.m
│ ├── GPUImageLightenBlendFilter.h
│ ├── GPUImageLightenBlendFilter.m
│ ├── GPUImageLineGenerator.h
│ ├── GPUImageLineGenerator.m
│ ├── GPUImageLinearBurnBlendFilter.h
│ ├── GPUImageLinearBurnBlendFilter.m
│ ├── GPUImageLocalBinaryPatternFilter.h
│ ├── GPUImageLocalBinaryPatternFilter.m
│ ├── GPUImageLookupFilter.h
│ ├── GPUImageLookupFilter.m
│ ├── GPUImageLowPassFilter.h
│ ├── GPUImageLowPassFilter.m
│ ├── GPUImageLuminanceRangeFilter.h
│ ├── GPUImageLuminanceRangeFilter.m
│ ├── GPUImageLuminanceThresholdFilter.h
│ ├── GPUImageLuminanceThresholdFilter.m
│ ├── GPUImageLuminosity.h
│ ├── GPUImageLuminosity.m
│ ├── GPUImageLuminosityBlendFilter.h
│ ├── GPUImageLuminosityBlendFilter.m
│ ├── GPUImageMaskFilter.h
│ ├── GPUImageMaskFilter.m
│ ├── GPUImageMedianFilter.h
│ ├── GPUImageMedianFilter.m
│ ├── GPUImageMissEtikateFilter.h
│ ├── GPUImageMissEtikateFilter.m
│ ├── GPUImageMonochromeFilter.h
│ ├── GPUImageMonochromeFilter.m
│ ├── GPUImageMosaicFilter.h
│ ├── GPUImageMosaicFilter.m
│ ├── GPUImageMotionBlurFilter.h
│ ├── GPUImageMotionBlurFilter.m
│ ├── GPUImageMotionDetector.h
│ ├── GPUImageMotionDetector.m
│ ├── GPUImageMovie.h
│ ├── GPUImageMovie.m
│ ├── GPUImageMovieComposition.h
│ ├── GPUImageMovieComposition.m
│ ├── GPUImageMultiplyBlendFilter.h
│ ├── GPUImageMultiplyBlendFilter.m
│ ├── GPUImageNobleCornerDetectionFilter.h
│ ├── GPUImageNobleCornerDetectionFilter.m
│ ├── GPUImageNonMaximumSuppressionFilter.h
│ ├── GPUImageNonMaximumSuppressionFilter.m
│ ├── GPUImageNormalBlendFilter.h
│ ├── GPUImageNormalBlendFilter.m
│ ├── GPUImageOpacityFilter.h
│ ├── GPUImageOpacityFilter.m
│ ├── GPUImageOpeningFilter.h
│ ├── GPUImageOpeningFilter.m
│ ├── GPUImageOutput.h
│ ├── GPUImageOutput.m
│ ├── GPUImageOverlayBlendFilter.h
│ ├── GPUImageOverlayBlendFilter.m
│ ├── GPUImageParallelCoordinateLineTransformFilter.h
│ ├── GPUImageParallelCoordinateLineTransformFilter.m
│ ├── GPUImagePerlinNoiseFilter.h
│ ├── GPUImagePerlinNoiseFilter.m
│ ├── GPUImagePinchDistortionFilter.h
│ ├── GPUImagePinchDistortionFilter.m
│ ├── GPUImagePixellateFilter.h
│ ├── GPUImagePixellateFilter.m
│ ├── GPUImagePixellatePositionFilter.h
│ ├── GPUImagePixellatePositionFilter.m
│ ├── GPUImagePoissonBlendFilter.h
│ ├── GPUImagePoissonBlendFilter.m
│ ├── GPUImagePolarPixellateFilter.h
│ ├── GPUImagePolarPixellateFilter.m
│ ├── GPUImagePolkaDotFilter.h
│ ├── GPUImagePolkaDotFilter.m
│ ├── GPUImagePosterizeFilter.h
│ ├── GPUImagePosterizeFilter.m
│ ├── GPUImagePrewittEdgeDetectionFilter.h
│ ├── GPUImagePrewittEdgeDetectionFilter.m
│ ├── GPUImageRGBClosingFilter.h
│ ├── GPUImageRGBClosingFilter.m
│ ├── GPUImageRGBDilationFilter.h
│ ├── GPUImageRGBDilationFilter.m
│ ├── GPUImageRGBErosionFilter.h
│ ├── GPUImageRGBErosionFilter.m
│ ├── GPUImageRGBFilter.h
│ ├── GPUImageRGBFilter.m
│ ├── GPUImageRGBOpeningFilter.h
│ ├── GPUImageRGBOpeningFilter.m
│ ├── GPUImageRawDataInput.h
│ ├── GPUImageRawDataInput.m
│ ├── GPUImageRawDataOutput.h
│ ├── GPUImageRawDataOutput.m
│ ├── GPUImageSaturationBlendFilter.h
│ ├── GPUImageSaturationBlendFilter.m
│ ├── GPUImageSaturationFilter.h
│ ├── GPUImageSaturationFilter.m
│ ├── GPUImageScreenBlendFilter.h
│ ├── GPUImageScreenBlendFilter.m
│ ├── GPUImageSepiaFilter.h
│ ├── GPUImageSepiaFilter.m
│ ├── GPUImageSharpenFilter.h
│ ├── GPUImageSharpenFilter.m
│ ├── GPUImageShiTomasiFeatureDetectionFilter.h
│ ├── GPUImageShiTomasiFeatureDetectionFilter.m
│ ├── GPUImageSingleComponentGaussianBlurFilter.h
│ ├── GPUImageSingleComponentGaussianBlurFilter.m
│ ├── GPUImageSketchFilter.h
│ ├── GPUImageSketchFilter.m
│ ├── GPUImageSkinToneFilter.h
│ ├── GPUImageSkinToneFilter.m
│ ├── GPUImageSmoothToonFilter.h
│ ├── GPUImageSmoothToonFilter.m
│ ├── GPUImageSobelEdgeDetectionFilter.h
│ ├── GPUImageSobelEdgeDetectionFilter.m
│ ├── GPUImageSoftEleganceFilter.h
│ ├── GPUImageSoftEleganceFilter.m
│ ├── GPUImageSoftLightBlendFilter.h
│ ├── GPUImageSoftLightBlendFilter.m
│ ├── GPUImageSolarizeFilter.h
│ ├── GPUImageSolarizeFilter.m
│ ├── GPUImageSolidColorGenerator.h
│ ├── GPUImageSolidColorGenerator.m
│ ├── GPUImageSourceOverBlendFilter.h
│ ├── GPUImageSourceOverBlendFilter.m
│ ├── GPUImageSphereRefractionFilter.h
│ ├── GPUImageSphereRefractionFilter.m
│ ├── GPUImageStillCamera.h
│ ├── GPUImageStillCamera.m
│ ├── GPUImageStretchDistortionFilter.h
│ ├── GPUImageStretchDistortionFilter.m
│ ├── GPUImageSubtractBlendFilter.h
│ ├── GPUImageSubtractBlendFilter.m
│ ├── GPUImageSwirlFilter.h
│ ├── GPUImageSwirlFilter.m
│ ├── GPUImageTextureInput.h
│ ├── GPUImageTextureInput.m
│ ├── GPUImageTextureOutput.h
│ ├── GPUImageTextureOutput.m
│ ├── GPUImageThreeInputFilter.h
│ ├── GPUImageThreeInputFilter.m
│ ├── GPUImageThresholdEdgeDetectionFilter.h
│ ├── GPUImageThresholdEdgeDetectionFilter.m
│ ├── GPUImageThresholdSketchFilter.h
│ ├── GPUImageThresholdSketchFilter.m
│ ├── GPUImageThresholdedNonMaximumSuppressionFilter.h
│ ├── GPUImageThresholdedNonMaximumSuppressionFilter.m
│ ├── GPUImageTiltShiftFilter.h
│ ├── GPUImageTiltShiftFilter.m
│ ├── GPUImageToneCurveFilter.h
│ ├── GPUImageToneCurveFilter.m
│ ├── GPUImageToonFilter.h
│ ├── GPUImageToonFilter.m
│ ├── GPUImageTransformFilter.h
│ ├── GPUImageTransformFilter.m
│ ├── GPUImageTwoInputCrossTextureSamplingFilter.h
│ ├── GPUImageTwoInputCrossTextureSamplingFilter.m
│ ├── GPUImageTwoInputFilter.h
│ ├── GPUImageTwoInputFilter.m
│ ├── GPUImageTwoPassFilter.h
│ ├── GPUImageTwoPassFilter.m
│ ├── GPUImageTwoPassTextureSamplingFilter.h
│ ├── GPUImageTwoPassTextureSamplingFilter.m
│ ├── GPUImageUIElement.h
│ ├── GPUImageUIElement.m
│ ├── GPUImageUnsharpMaskFilter.h
│ ├── GPUImageUnsharpMaskFilter.m
│ ├── GPUImageVibranceFilter.h
│ ├── GPUImageVibranceFilter.m
│ ├── GPUImageVideoCamera.h
│ ├── GPUImageVideoCamera.m
│ ├── GPUImageVignetteFilter.h
│ ├── GPUImageVignetteFilter.m
│ ├── GPUImageVoronoiConsumerFilter.h
│ ├── GPUImageVoronoiConsumerFilter.m
│ ├── GPUImageWeakPixelInclusionFilter.h
│ ├── GPUImageWeakPixelInclusionFilter.m
│ ├── GPUImageWhiteBalanceFilter.h
│ ├── GPUImageWhiteBalanceFilter.m
│ ├── GPUImageXYDerivativeFilter.h
│ ├── GPUImageXYDerivativeFilter.m
│ ├── GPUImageZoomBlurFilter.h
│ ├── GPUImageZoomBlurFilter.m
│ ├── GPUImageiOSBlurFilter.h
│ ├── GPUImageiOSBlurFilter.m
│ ├── Mac
│ ├── GPUImage.h
│ ├── GPUImageAVCamera.h
│ ├── GPUImageAVCamera.m
│ ├── GPUImageContext.h
│ ├── GPUImageContext.m
│ ├── GPUImageMac-Info.plist
│ ├── GPUImageMac-Prefix.pch
│ ├── GPUImageMovieWriter.h
│ ├── GPUImageMovieWriter.m
│ ├── GPUImagePicture.h
│ ├── GPUImagePicture.m
│ ├── GPUImageView.h
│ ├── GPUImageView.m
│ └── en.lproj
│ │ └── InfoPlist.strings
│ └── iOS
│ ├── Framework
│ ├── GPUImageFramework.h
│ ├── Info.plist
│ └── module.modulemap
│ ├── GPUImage-Prefix.pch
│ ├── GPUImageContext.h
│ ├── GPUImageContext.m
│ ├── GPUImageMovieWriter.h
│ ├── GPUImageMovieWriter.m
│ ├── GPUImagePicture+TextureSubimage.h
│ ├── GPUImagePicture+TextureSubimage.m
│ ├── GPUImagePicture.h
│ ├── GPUImagePicture.m
│ ├── GPUImageView.h
│ └── GPUImageView.m
├── LICENSE
├── README.md
└── clibs
├── .DS_Store
├── 3th-party
├── .DS_Store
├── libfaac
│ ├── include
│ │ ├── faac.h
│ │ └── faaccfg.h
│ └── lib
│ │ └── libfaac.a
├── librtmp
│ ├── include
│ │ ├── amf.h
│ │ ├── http.h
│ │ ├── log.h
│ │ └── rtmp.h
│ └── lib
│ │ └── librtmp.a
├── libx264
│ ├── include
│ │ ├── x264.h
│ │ └── x264_config.h
│ └── lib
│ │ └── libx264.a
└── libyuv
│ ├── .DS_Store
│ ├── include
│ ├── libyuv.h
│ └── libyuv
│ │ ├── .svn
│ │ ├── all-wcprops
│ │ ├── entries
│ │ ├── prop-base
│ │ │ ├── basic_types.h.svn-base
│ │ │ ├── compare.h.svn-base
│ │ │ ├── convert.h.svn-base
│ │ │ ├── convert_from.h.svn-base
│ │ │ ├── cpu_id.h.svn-base
│ │ │ ├── format_conversion.h.svn-base
│ │ │ ├── mjpeg_decoder.h.svn-base
│ │ │ ├── planar_functions.h.svn-base
│ │ │ ├── scale.h.svn-base
│ │ │ └── version.h.svn-base
│ │ └── text-base
│ │ │ ├── basic_types.h.svn-base
│ │ │ ├── compare.h.svn-base
│ │ │ ├── convert.h.svn-base
│ │ │ ├── convert_argb.h.svn-base
│ │ │ ├── convert_from.h.svn-base
│ │ │ ├── convert_from_argb.h.svn-base
│ │ │ ├── cpu_id.h.svn-base
│ │ │ ├── format_conversion.h.svn-base
│ │ │ ├── mjpeg_decoder.h.svn-base
│ │ │ ├── planar_functions.h.svn-base
│ │ │ ├── rotate.h.svn-base
│ │ │ ├── rotate_argb.h.svn-base
│ │ │ ├── row.h.svn-base
│ │ │ ├── scale.h.svn-base
│ │ │ ├── scale_argb.h.svn-base
│ │ │ ├── version.h.svn-base
│ │ │ └── video_common.h.svn-base
│ │ ├── basic_types.h
│ │ ├── compare.h
│ │ ├── convert.h
│ │ ├── convert_argb.h
│ │ ├── convert_from.h
│ │ ├── convert_from_argb.h
│ │ ├── cpu_id.h
│ │ ├── format_conversion.h
│ │ ├── mjpeg_decoder.h
│ │ ├── planar_functions.h
│ │ ├── rotate.h
│ │ ├── rotate_argb.h
│ │ ├── row.h
│ │ ├── scale.h
│ │ ├── scale_argb.h
│ │ ├── scale_row.h
│ │ ├── version.h
│ │ └── video_common.h
│ └── lib
│ └── libyuv.a
└── libaw
├── .DS_Store
├── aw_all.h
├── common
├── aw_alloc.c
├── aw_alloc.h
├── aw_array.c
├── aw_array.h
├── aw_common.h
├── aw_data.c
├── aw_data.h
├── aw_dict.c
├── aw_dict.h
├── aw_file.c
├── aw_file.h
├── aw_rtmp.c
├── aw_rtmp.h
├── aw_thread_poll.c
├── aw_thread_poll.h
└── aw_utils.h
└── pushStream
├── .DS_Store
├── encoder
├── aw_faac.c
├── aw_faac.h
├── aw_sw_faac_encoder.c
├── aw_sw_faac_encoder.h
├── aw_sw_x264_encoder.c
├── aw_sw_x264_encoder.h
├── aw_x264.c
└── aw_x264.h
├── flv
├── aw_encode_flv.c
└── aw_encode_flv.h
└── rtmp
├── aw_streamer.c
└── aw_streamer.h
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/.DS_Store
--------------------------------------------------------------------------------
/AWLive.xcodeproj/project.xcworkspace/contents.xcworkspacedata:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/AWLive.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | IDEDidComputeMac32BitWarning
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/AWLive.xcodeproj/project.xcworkspace/xcuserdata/pengchao.xcuserdatad/IDEFindNavigatorScopes.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/AWLive.xcodeproj/project.xcworkspace/xcuserdata/pengchao.xcuserdatad/UserInterfaceState.xcuserstate:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/AWLive.xcodeproj/project.xcworkspace/xcuserdata/pengchao.xcuserdatad/UserInterfaceState.xcuserstate
--------------------------------------------------------------------------------
/AWLive.xcodeproj/xcuserdata/pengchao.xcuserdatad/xcschemes/xcschememanagement.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | SchemeUserState
6 |
7 | AWLive.xcscheme_^#shared#^_
8 |
9 | orderHint
10 | 3
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/AWLive/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/AWLive/.DS_Store
--------------------------------------------------------------------------------
/AWLive/AppDelegate.h:
--------------------------------------------------------------------------------
1 | //
2 | // AppDelegate.h
3 | // AWLive
4 | //
5 | // Created by xxx on 5/11/16.
6 | //
7 | //
8 |
9 | #import
10 |
11 | @interface AppDelegate : UIResponder
12 |
13 | @property (strong, nonatomic) UIWindow *window;
14 |
15 |
16 | @end
17 |
18 |
--------------------------------------------------------------------------------
/AWLive/Assets.xcassets/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/AWLive/Assets.xcassets/.DS_Store
--------------------------------------------------------------------------------
/AWLive/Assets.xcassets/Contents.json:
--------------------------------------------------------------------------------
1 | {
2 | "info" : {
3 | "author" : "xcode",
4 | "version" : 1
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/AWLive/Assets.xcassets/image.imageset/1f6f9a9064f5411128210318f7262eed.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/AWLive/Assets.xcassets/image.imageset/1f6f9a9064f5411128210318f7262eed.jpeg
--------------------------------------------------------------------------------
/AWLive/Assets.xcassets/image.imageset/Contents.json:
--------------------------------------------------------------------------------
1 | {
2 | "images" : [
3 | {
4 | "idiom" : "universal",
5 | "scale" : "1x"
6 | },
7 | {
8 | "filename" : "1f6f9a9064f5411128210318f7262eed.jpeg",
9 | "idiom" : "universal",
10 | "scale" : "2x"
11 | },
12 | {
13 | "idiom" : "universal",
14 | "scale" : "3x"
15 | }
16 | ],
17 | "info" : {
18 | "author" : "xcode",
19 | "version" : 1
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/AWLive/Base.lproj/Main.storyboard:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/AWLive/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | en
7 | CFBundleExecutable
8 | $(EXECUTABLE_NAME)
9 | CFBundleIdentifier
10 | $(PRODUCT_BUNDLE_IDENTIFIER)
11 | CFBundleInfoDictionaryVersion
12 | 6.0
13 | CFBundleName
14 | $(PRODUCT_NAME)
15 | CFBundlePackageType
16 | APPL
17 | CFBundleShortVersionString
18 | 1.0
19 | CFBundleVersion
20 | 1
21 | LSRequiresIPhoneOS
22 |
23 | NSCameraUsageDescription
24 |
25 | NSMicrophoneUsageDescription
26 |
27 | UILaunchStoryboardName
28 | LaunchScreen
29 | UIMainStoryboardFile
30 | Main
31 | UIRequiredDeviceCapabilities
32 |
33 | armv7
34 |
35 | UISupportedInterfaceOrientations
36 |
37 | UIInterfaceOrientationPortrait
38 |
39 | UISupportedInterfaceOrientations~ipad
40 |
41 | UIInterfaceOrientationPortrait
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/AWLive/PushStream/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/AWLive/PushStream/.DS_Store
--------------------------------------------------------------------------------
/AWLive/PushStream/Capture/AWAVCaptureManager.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | 用于生成AVCapture,不同的捕获视频的方法
5 | */
6 |
7 | #import
8 | #import "AWAVBaseCapture.h"
9 | #import "AWEncoderManager.h"
10 |
11 | typedef enum : NSUInteger {
12 | AWAVCaptureTypeNone,
13 | AWAVCaptureTypeSystem,
14 | AWAVCaptureTypeGPUImage,
15 | } AWAVCaptureType;
16 |
17 | @interface AWAVCaptureManager : NSObject
18 | //视频捕获类型
19 | @property (nonatomic, unsafe_unretained) AWAVCaptureType captureType;
20 | @property (nonatomic, weak) AWAVBaseCapture *avCapture;
21 |
22 | //编码器类型
23 | @property (nonatomic, unsafe_unretained) AWAudioEncoderType audioEncoderType;
24 | @property (nonatomic, unsafe_unretained) AWVideoEncoderType videoEncoderType;
25 |
26 | //配置
27 | @property (nonatomic, strong) AWAudioConfig *audioConfig;
28 | @property (nonatomic, strong) AWVideoConfig *videoConfig;
29 | @end
30 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Capture/AWAVConfig.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | 音视频配置文件,其中有些值有固定范围,不能随意填写。
5 | */
6 |
7 | #import
8 | #import
9 | #import "aw_all.h"
10 |
11 | @interface AWAudioConfig : NSObject
12 | @property (nonatomic, unsafe_unretained) NSInteger bitrate; //可自由设置
13 | @property (nonatomic, unsafe_unretained) NSInteger channelCount; //可选 1 2
14 | @property (nonatomic, unsafe_unretained) NSInteger sampleRate; //可选 44100 22050 11025 5500
15 | @property (nonatomic, unsafe_unretained) NSInteger sampleSize; //可选 16 8
16 |
17 | @property (nonatomic, readonly, unsafe_unretained) aw_faac_config faacConfig;
18 | @end
19 |
20 | @interface AWVideoConfig : NSObject
21 | @property (nonatomic, unsafe_unretained) NSInteger width; //可选,系统支持的分辨率,采集分辨率的宽
22 | @property (nonatomic, unsafe_unretained) NSInteger height; //可选,系统支持的分辨率,采集分辨率的高
23 | @property (nonatomic, unsafe_unretained) NSInteger bitrate; //自由设置
24 | @property (nonatomic, unsafe_unretained) NSInteger fps; //自由设置
25 | @property (nonatomic, unsafe_unretained) NSInteger dataFormat; //目前软编码只能是X264_CSP_NV12,硬编码无需设置
26 | @property (nonatomic, unsafe_unretained) NSInteger videoMaxKeyFrameInterval; //关键帧间隔
27 | @property (nonatomic, unsafe_unretained) UIInterfaceOrientation orientation; //推流方向
28 | @property (nonatomic, readonly, unsafe_unretained) NSInteger pushStreamWidth;
29 | @property (nonatomic, readonly, unsafe_unretained) NSInteger pushStreamHeight;
30 |
31 | -(BOOL)shouldRotate;
32 | @property (nonatomic, readonly, unsafe_unretained) aw_x264_config x264Config;
33 | @end
34 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Capture/AWFileManager.h:
--------------------------------------------------------------------------------
1 | //
2 | // AWFileManager.h
3 | // AWLive
4 | //
5 | // Created by pengchao on 2022/6/13.
6 | //
7 |
8 | #import
9 |
10 | NS_ASSUME_NONNULL_BEGIN
11 | typedef enum : NSUInteger {
12 | MEDIA_TYPE_UNKNOW,
13 | MEDIA_TYPE_PCM,
14 | MEDIA_TYPE_AAC,
15 | MEDIA_TYPE_H264,
16 | MEDIA_TYPE_FLV,
17 | } MEDIA_TYPE;
18 |
19 | @interface AWFileManager : NSObject
20 | @property (unsafe_unretained, readonly) FILE *audio_file;
21 | @property (unsafe_unretained, readonly) FILE *video_file;
22 | @property (unsafe_unretained, readonly) FILE *pcm_file;
23 | @property (unsafe_unretained, readonly) FILE *flv_file;
24 |
25 | @property (nonatomic, assign) NSInteger recoderTimerNum; ///采集时间(单位S)
26 |
27 |
28 | - (NSString *)createFileWithMediaType:(MEDIA_TYPE)mediaType;
29 |
30 | - (NSString *)createFileWithFileName:(NSString *)fileName;
31 |
32 | - (BOOL)createAndOpenFileWithMediaType:(MEDIA_TYPE)mediaType;
33 |
34 | - (BOOL)clearCacheWithMediaType:(MEDIA_TYPE)type;
35 |
36 | - (NSString *)doucumentPath;
37 |
38 | - (NSString *)createDirWithDirName:(NSString *)dirName;
39 |
40 | - (BOOL)deleteFileWithFileName:(NSString *)fileName;
41 |
42 | - (BOOL)clearDocumentDir;
43 |
44 | - (void)startRecord;
45 |
46 | - (void)stopRecord;
47 |
48 | @end
49 |
50 | NS_ASSUME_NONNULL_END
51 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Capture/AWGPUImageAVCapture.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | 使用GPUImage进行音视频捕获,可以使用GPUImage美颜,或者其他滤镜效果。
5 | */
6 |
7 | #import
8 | #import "AWAVBaseCapture.h"
9 |
10 | @interface AWGPUImageAVCapture : AWAVBaseCapture
11 | @end
12 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Capture/AWSystemAVCapture.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | 使用系统接口捕获视频,没有进行美颜。
5 | 系统捕获音视频的详细过程。
6 | */
7 |
8 | #import
9 | #import "AWAVBaseCapture.h"
10 |
11 | @interface AWSystemAVCapture : AWAVBaseCapture
12 |
13 | @end
14 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Capture/AWSystemPreview.h:
--------------------------------------------------------------------------------
1 | //
2 | // CCCameraPreview.h
3 | // 001-Demo
4 | //
5 | // Created by pengchao on 2022/9/14.
6 | //
7 |
8 | #import
9 | #import
10 | NS_ASSUME_NONNULL_BEGIN
11 |
12 | @protocol AWSystemPreviewDelegate
13 | @optional
14 | - (void)singleTappedAtPoint:(CGPoint)point;
15 | - (void)doubleTappedAtPoint:(CGPoint)point;
16 | - (void)longPressAtPoint:(CGPoint)point;
17 | - (void)updateVideoZoomFactor:(CGFloat)factor;
18 |
19 | @end
20 |
21 |
22 | @interface AWSystemPreview : UIView
23 |
24 | @property (nonatomic, readonly) AVCaptureVideoPreviewLayer *videoPreviewLayer;
25 | @property (nonatomic, strong, setter=setSession:) AVCaptureSession *session;
26 | @property (weak, nonatomic) id delegate;
27 | @end
28 |
29 | NS_ASSUME_NONNULL_END
30 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/AWLive/PushStream/Encoder/.DS_Store
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/AWEncoderManager.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | 编码器管理,类似于工厂。负责生成所有的音视频编码器。
5 | */
6 |
7 | #import
8 | #import "AWVideoEncoder.h"
9 | #import "AWAudioEncoder.h"
10 |
11 | typedef enum : NSUInteger {
12 | AWVideoEncoderTypeNone,
13 | AWVideoEncoderTypeHWH264,
14 | AWVideoEncoderTypeSWX264,
15 | } AWVideoEncoderType;
16 |
17 | typedef enum : NSUInteger {
18 | AWAudioEncoderTypeNone,
19 | AWAudioEncoderTypeHWAACLC,
20 | AWAudioEncoderTypeSWFAAC,
21 | } AWAudioEncoderType;
22 |
23 | @class AWVideoEncoder;
24 | @class AWAudioEncoder;
25 | @class AWAudioConfig;
26 | @class AWVideoConfig;
27 |
28 | @interface AWEncoderManager : NSObject
29 | //编码器类型
30 | @property (nonatomic, unsafe_unretained) AWAudioEncoderType audioEncoderType;
31 | @property (nonatomic, unsafe_unretained) AWVideoEncoderType videoEncoderType;
32 |
33 | //编码器
34 | @property (nonatomic, readonly, strong) AWVideoEncoder *videoEncoder;
35 | @property (nonatomic, readonly, strong) AWAudioEncoder *audioEncoder;
36 |
37 | //时间戳
38 | @property (nonatomic, unsafe_unretained) uint32_t timestamp;
39 |
40 | //开启关闭
41 | - (void)openWithAudioConfig:(AWAudioConfig *)audioConfig videoConfig:(AWVideoConfig *)videoConfig;
42 | - (void)close;
43 |
44 | - (BOOL)enableWriteAudioFile:(NSString *)fileName;
45 | - (BOOL)enableWriteVideoFile:(NSString *)fileName;
46 | - (BOOL)enableWriteFLVFile:(NSString *)fileName;
47 | @end
48 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/Base/AWAudioEncoder.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | 音频编码器基类,只声明接口,和一些公共转换数据函数。
5 | */
6 |
7 | #import "AWBaseEncoder.h"
8 |
9 | @interface AWAudioEncoder : AWBaseEncoder
10 |
11 | @property (nonatomic, copy) AWAudioConfig *audioConfig;
12 | /// 编码
13 | - (aw_flv_audio_tag *)encodePCMDataToFlvTag:(NSData *)pcmData;
14 |
15 | - (aw_flv_audio_tag *)encodeAudioSampleBufToFlvTag:(CMSampleBufferRef)audioSample;
16 | /// 创建 audio specific config
17 | - (aw_flv_audio_tag *)createAudioSpecificConfigFlvTag;
18 | /// 转换
19 | - (NSData *) convertAudioSmapleBufferToPcmData:(CMSampleBufferRef)audioSample;
20 |
21 | @end
22 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/Base/AWAudioEncoder.m:
--------------------------------------------------------------------------------
1 |
2 |
3 | #import "AWAudioEncoder.h"
4 |
5 | @implementation AWAudioEncoder
6 | - (aw_flv_audio_tag *) encodePCMDataToFlvTag:(NSData *)pcmData {
7 | return NULL;
8 | }
9 |
10 | - (aw_flv_audio_tag *) encodeAudioSampleBufToFlvTag:(CMSampleBufferRef)audioSample {
11 |
12 | return [self encodePCMDataToFlvTag:[self convertAudioSmapleBufferToPcmData:audioSample]];
13 | }
14 |
15 | - (aw_flv_audio_tag *)createAudioSpecificConfigFlvTag{
16 | return NULL;
17 | }
18 |
19 | - (NSData *) convertAudioSmapleBufferToPcmData:(CMSampleBufferRef)audioSample {
20 | //获取pcm数据大小
21 | NSInteger audioDataSize = CMSampleBufferGetTotalSampleSize(audioSample);
22 |
23 | //分配空间
24 | int8_t *audio_data = aw_alloc((int32_t)audioDataSize);
25 |
26 | //获取CMBlockBufferRef
27 | //这个结构里面就保存了 PCM数据
28 | CMBlockBufferRef dataBuffer = CMSampleBufferGetDataBuffer(audioSample);
29 | //直接将数据copy至我们自己分配的内存中
30 | OSStatus status = CMBlockBufferCopyDataBytes(dataBuffer, 0, audioDataSize, audio_data);
31 | NSError *error = nil;
32 | if (status != kCMBlockBufferNoErr) {
33 | error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];
34 | NSLog(@"Error: ACC encode get data point error: %@",error);
35 | return nil;
36 | }
37 | //返回数据
38 | return [NSData dataWithBytesNoCopy:audio_data length:audioDataSize];
39 | }
40 |
41 | @end
42 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/Base/AWBaseEncoder.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | 编码器基类声明公共接口
5 | */
6 |
7 | #import
8 | #import "AWAVConfig.h"
9 | #include "aw_all.h"
10 |
11 | typedef enum : NSUInteger {
12 | AWEncoderErrorCodeVTSessionCreateFailed,
13 | AWEncoderErrorCodeVTSessionPrepareFailed,
14 | AWEncoderErrorCodeLockSampleBaseAddressFailed,
15 | AWEncoderErrorCodeEncodeVideoFrameFailed,
16 | AWEncoderErrorCodeEncodeCreateBlockBufFailed,
17 | AWEncoderErrorCodeEncodeCreateSampleBufFailed,
18 | AWEncoderErrorCodeEncodeGetSpsPpsFailed,
19 | AWEncoderErrorCodeEncodeGetH264DataFailed,
20 | AWEncoderErrorCodeCreateAudioConverterFailed,
21 | AWEncoderErrorCodeAudioConverterGetMaxFrameSizeFailed,
22 | AWEncoderErrorCodeAudioEncoderFailed,
23 | } AWEncoderErrorCode;
24 |
25 | @class AWEncoderManager;
26 | @interface AWBaseEncoder : NSObject
27 | @property (nonatomic, weak) AWEncoderManager *manager;
28 | //开始
29 | - (void)open;
30 | //结束
31 | - (void)close;
32 | //错误
33 | - (void)onErrorWithCode:(AWEncoderErrorCode) code des:(NSString *) des;
34 |
35 |
36 |
37 | @end
38 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/Base/AWBaseEncoder.m:
--------------------------------------------------------------------------------
1 |
2 |
3 | #import "AWBaseEncoder.h"
4 |
5 | @implementation AWBaseEncoder
6 |
7 | - (void)open{}
8 |
9 | - (void)close{}
10 |
11 | - (void) onErrorWithCode:(AWEncoderErrorCode)code des:(NSString *)des {
12 | aw_log("[ERROR] encoder error code:%ld des:%s", (unsigned long)code, des.UTF8String);
13 | }
14 |
15 | @end
16 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/Base/AWVideoEncoder.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | 视频编码器基类,只声明接口,和一些公共转换数据函数。
5 | */
6 |
7 | #import "AWBaseEncoder.h"
8 |
9 | @interface AWVideoEncoder : AWBaseEncoder
10 |
11 | /// 临时加的
12 | @property (nonatomic, copy) NSMutableData *spsData;
13 | @property (nonatomic, copy) NSMutableData *ppsData;
14 |
15 | @property (nonatomic, copy) AWVideoConfig *videoConfig;
16 |
17 | /// 旋转
18 | - (NSData *)rotateNV12Data:(NSData *)nv12Data;
19 |
20 | /// 编码
21 | - (aw_flv_video_tag *)encodeYUVWithPixelBuffer:(CVPixelBufferRef)pixelBuffer;
22 |
23 | - (aw_flv_video_tag *)encodeVideoSampleBufToFlvTag:(CMSampleBufferRef)videoSample;
24 |
25 | /// 根据flv,h264,aac协议,提供首帧需要发送的tag
26 | /// 创建sps pps
27 | - (aw_flv_video_tag *)createSpsPpsFlvTag;
28 |
29 | /// 转换
30 | - (NSData *)convertVideoSmapleBufferToYuvData:(CVPixelBufferRef)pixelBuffer;
31 |
32 |
33 | @end
34 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/HW/AWHWAACEncoder.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | AAC硬编码器
5 | */
6 |
7 | #import "AWAudioEncoder.h"
8 |
9 | @interface AWHWAACEncoder : AWAudioEncoder
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/HW/AWHWH264Encoder.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | h264硬编码器
5 | */
6 |
7 | #import "AWVideoEncoder.h"
8 |
9 | #define BGMLIVE_TIME_NOW (CACurrentMediaTime()*1000000)
10 | @interface AWHWH264Encoder : AWVideoEncoder
11 |
12 | @end
13 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/SW/AWSWFaacEncoder.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | aac软编码器(faac)
5 | */
6 |
7 | #import "AWAudioEncoder.h"
8 |
9 | @interface AWSWFaacEncoder : AWAudioEncoder
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/SW/AWSWFaacEncoder.m:
--------------------------------------------------------------------------------
1 |
2 |
3 | #import "AWSWFaacEncoder.h"
4 | #import "AWEncoderManager.h"
5 |
6 | @implementation AWSWFaacEncoder
7 |
8 | - (aw_flv_audio_tag *) encodePCMDataToFlvTag:(NSData *)pcmData {
9 | self.manager.timestamp += aw_sw_faac_encoder_max_input_sample_count() * 1000 / self.audioConfig.sampleRate;
10 | return aw_sw_encoder_encode_faac_data((int8_t *)pcmData.bytes, pcmData.length, self.manager.timestamp);
11 | }
12 |
13 | - (aw_flv_audio_tag *)createAudioSpecificConfigFlvTag {
14 | return aw_sw_encoder_create_faac_specific_config_tag();
15 | }
16 |
17 | - (void)open {
18 | aw_faac_config faac_config = self.audioConfig.faacConfig;
19 | aw_sw_encoder_open_faac_encoder(&faac_config);
20 | }
21 |
22 | - (void)close {
23 | aw_sw_encoder_close_faac_encoder();
24 | }
25 | @end
26 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/SW/AWSWX264Encoder.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | h264软编码器(x264)
5 | */
6 |
7 | #import "AWVideoEncoder.h"
8 |
9 | @interface AWSWX264Encoder : AWVideoEncoder
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/AWLive/PushStream/Encoder/SW/AWSWX264Encoder.m:
--------------------------------------------------------------------------------
1 |
2 |
3 | #import "AWSWX264Encoder.h"
4 | #import "AWEncoderManager.h"
5 |
6 | @implementation AWSWX264Encoder
7 | /// 废弃,
8 | - (aw_flv_video_tag *)encodeYUVWithPixelBuffer:(CVPixelBufferRef)pixelBuffer {
9 | NSData *yuvData = [self convertVideoSmapleBufferToYuvData:pixelBuffer];
10 | long y_stride = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
11 | return aw_sw_encoder_encode_x264_data((int8_t *)yuvData.bytes, yuvData.length, y_stride, self.manager.timestamp + 1);
12 | }
13 |
14 | - (aw_flv_video_tag *)createSpsPpsFlvTag {
15 | return aw_sw_encoder_create_x264_sps_pps_tag();
16 | }
17 |
18 | - (void)open {
19 | aw_x264_config x264_config = self.videoConfig.x264Config;
20 | aw_sw_encoder_open_x264_encoder(&x264_config);
21 | }
22 |
23 | - (void)close {
24 | aw_sw_encoder_close_x264_encoder();
25 | }
26 |
27 | @end
28 |
--------------------------------------------------------------------------------
/AWLive/PushStream/GpuImage/AWGPUImageVideoCamera.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | GPUImage camera重载,获取音频数据。
5 | */
6 |
7 | #import
8 | #import
9 |
10 | @protocol AWGPUImageVideoCameraDelegate
11 |
12 | - (void)processAudioSample:(CMSampleBufferRef)sampleBuffer;
13 |
14 | @end
15 |
16 | @interface AWGPUImageVideoCamera : GPUImageVideoCamera
17 |
18 | @property (nonatomic, weak) id awAudioDelegate;
19 |
20 | - (void)setCaptureSessionPreset:(NSString *)captureSessionPreset;
21 |
22 | @end
23 |
--------------------------------------------------------------------------------
/AWLive/PushStream/GpuImage/AWGPUImageVideoCamera.m:
--------------------------------------------------------------------------------
1 |
2 |
3 | #import "AWGPUImageVideoCamera.h"
4 |
5 | @implementation AWGPUImageVideoCamera
6 |
7 | - (void)processAudioSampleBuffer:(CMSampleBufferRef)sampleBuffer {
8 | [super processAudioSampleBuffer:sampleBuffer];
9 | [self.awAudioDelegate processAudioSample:sampleBuffer];
10 | }
11 |
12 | - (void)setCaptureSessionPreset:(NSString *)captureSessionPreset {
13 | if (!_captureSession || ![_captureSession canSetSessionPreset:captureSessionPreset]) {
14 | @throw [NSException exceptionWithName:@"Not supported captureSessionPreset" reason:[NSString stringWithFormat:@"captureSessionPreset is [%@]", captureSessionPreset] userInfo:nil];
15 | return;
16 | }
17 | [super setCaptureSessionPreset:captureSessionPreset];
18 | }
19 |
20 | @end
21 |
--------------------------------------------------------------------------------
/AWLive/PushStream/GpuImage/GPUImageBeautifyFilter.h:
--------------------------------------------------------------------------------
1 | //
2 | // GPUImageBeautifyFilter.h
3 | // BeautifyFaceDemo
4 | //
5 | // Created by guikz on 16/4/28.
6 | // Copyright © 2016年 guikz. All rights reserved.
7 | //
8 |
9 | /*
10 | * GPUImage 美颜
11 | * 使用了下面的链接,感谢分享
12 | * https://github.com/Guikunzhi/BeautifyFaceDemo
13 | */
14 |
15 | #import
16 |
17 | @class GPUImageCombinationFilter;
18 |
19 | @interface GPUImageBeautifyFilter : GPUImageFilterGroup {
20 | GPUImageBilateralFilter *bilateralFilter;
21 | GPUImageCannyEdgeDetectionFilter *cannyEdgeFilter;
22 | GPUImageCombinationFilter *combinationFilter;
23 | GPUImageHSBFilter *hsbFilter;
24 | }
25 |
26 | @end
27 |
--------------------------------------------------------------------------------
/AWLive/Test/AWLiveCapture.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | #import
4 | #import
5 | #import "ViewController.h"
6 | #import "AWFileManager.h"
7 |
8 | @interface AWLiveCapture : NSObject
9 |
10 | @property (nonatomic,strong) AWFileManager *fileManager;
11 |
12 | - (instancetype)initWithViewController:(ViewController *)viewCtl;
13 |
14 | - (void)onLayout;
15 |
16 | - (void)updatePresent:(AVCaptureSessionPreset)present;
17 |
18 | @end
19 |
--------------------------------------------------------------------------------
/AWLive/ViewController.h:
--------------------------------------------------------------------------------
1 | //
2 | // ViewController.h
3 | // AWLive
4 | //
5 | // Created by xxx on 5/11/16.
6 | //
7 | //
8 |
9 | //注意:
10 | //1. 竖屏
11 | //2. 8.0
12 | //3. disable bitcode
13 | //4. search header & search library
14 | //5. embeded library 添加 GpuImage
15 | //6. ipv6
16 | //7. info.plist NSCameraUsageDescription/NSMicrophoneUsageDescription/(ipv6还是https的那个)
17 |
18 | #import
19 |
20 | @interface ViewController : UIViewController
21 |
22 |
23 | @end
24 |
25 |
--------------------------------------------------------------------------------
/AWLive/camera_switch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/AWLive/camera_switch.png
--------------------------------------------------------------------------------
/AWLive/main.m:
--------------------------------------------------------------------------------
1 | //
2 | // main.m
3 | // AWLive
4 | //
5 | // Created by xxx on 5/11/16.
6 | //
7 | //
8 |
9 | #import
10 | #import "AppDelegate.h"
11 |
12 | int main(int argc, char * argv[]) {
13 | @autoreleasepool {
14 | return UIApplicationMain(argc, argv, nil, NSStringFromClass([AppDelegate class]));
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/GPUImage/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/GPUImage/.DS_Store
--------------------------------------------------------------------------------
/GPUImage/GPUImage.xcodeproj/project.xcworkspace/contents.xcworkspacedata:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/GPUImage/GPUImage.xcodeproj/xcuserdata/pengchao.xcuserdatad/xcschemes/xcschememanagement.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | SchemeUserState
6 |
7 | Documentation.xcscheme_^#shared#^_
8 |
9 | orderHint
10 | 0
11 |
12 | GPUImage.xcscheme_^#shared#^_
13 |
14 | orderHint
15 | 1
16 |
17 | GPUImageFramework.xcscheme_^#shared#^_
18 |
19 | orderHint
20 | 2
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/GPUImage/Resources/lookup.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/GPUImage/Resources/lookup.png
--------------------------------------------------------------------------------
/GPUImage/Resources/lookup_amatorka.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/GPUImage/Resources/lookup_amatorka.png
--------------------------------------------------------------------------------
/GPUImage/Resources/lookup_miss_etikate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/GPUImage/Resources/lookup_miss_etikate.png
--------------------------------------------------------------------------------
/GPUImage/Resources/lookup_soft_elegance_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/GPUImage/Resources/lookup_soft_elegance_1.png
--------------------------------------------------------------------------------
/GPUImage/Resources/lookup_soft_elegance_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/GPUImage/Resources/lookup_soft_elegance_2.png
--------------------------------------------------------------------------------
/GPUImage/Source/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/GPUImage/Source/.DS_Store
--------------------------------------------------------------------------------
/GPUImage/Source/GLProgram.h:
--------------------------------------------------------------------------------
1 | // This is Jeff LaMarche's GLProgram OpenGL shader wrapper class from his OpenGL ES 2.0 book.
2 | // A description of this can be found at his page on the topic:
3 | // http://iphonedevelopment.blogspot.com/2010/11/opengl-es-20-for-ios-chapter-4.html
4 | // I've extended this to be able to take programs as NSStrings in addition to files, for baked-in shaders
5 |
6 | #import
7 |
8 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
9 | #import
10 | #import
11 | #else
12 | #import
13 | #import
14 | #endif
15 |
16 | @interface GLProgram : NSObject
17 | {
18 | NSMutableArray *attributes;
19 | NSMutableArray *uniforms;
20 | GLuint program,
21 | vertShader,
22 | fragShader;
23 | }
24 |
25 | @property(readwrite, nonatomic) BOOL initialized;
26 | @property(readwrite, copy, nonatomic) NSString *vertexShaderLog;
27 | @property(readwrite, copy, nonatomic) NSString *fragmentShaderLog;
28 | @property(readwrite, copy, nonatomic) NSString *programLog;
29 |
30 | - (id)initWithVertexShaderString:(NSString *)vShaderString
31 | fragmentShaderString:(NSString *)fShaderString;
32 | - (id)initWithVertexShaderString:(NSString *)vShaderString
33 | fragmentShaderFilename:(NSString *)fShaderFilename;
34 | - (id)initWithVertexShaderFilename:(NSString *)vShaderFilename
35 | fragmentShaderFilename:(NSString *)fShaderFilename;
36 | - (void)addAttribute:(NSString *)attributeName;
37 | - (GLuint)attributeIndex:(NSString *)attributeName;
38 | - (GLuint)uniformIndex:(NSString *)uniformName;
39 | - (BOOL)link;
40 | - (void)use;
41 | - (void)validate;
42 | @end
43 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImage3x3ConvolutionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImage3x3TextureSamplingFilter.h"
2 |
3 | /** Runs a 3x3 convolution kernel against the image
4 | */
5 | @interface GPUImage3x3ConvolutionFilter : GPUImage3x3TextureSamplingFilter
6 | {
7 | GLint convolutionMatrixUniform;
8 | }
9 |
10 | /** Convolution kernel to run against the image
11 |
12 | The convolution kernel is a 3x3 matrix of values to apply to the pixel and its 8 surrounding pixels.
13 | The matrix is specified in row-major order, with the top left pixel being one.one and the bottom right three.three
14 | If the values in the matrix don't add up to 1.0, the image could be brightened or darkened.
15 | */
16 | @property(readwrite, nonatomic) GPUMatrix3x3 convolutionKernel;
17 |
18 | @end
19 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImage3x3TextureSamplingFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | extern NSString *const kGPUImageNearbyTexelSamplingVertexShaderString;
4 |
5 | @interface GPUImage3x3TextureSamplingFilter : GPUImageFilter
6 | {
7 | GLint texelWidthUniform, texelHeightUniform;
8 |
9 | CGFloat texelWidth, texelHeight;
10 | BOOL hasOverriddenImageSizeFactor;
11 | }
12 |
13 | // The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects.
14 | @property(readwrite, nonatomic) CGFloat texelWidth;
15 | @property(readwrite, nonatomic) CGFloat texelHeight;
16 |
17 |
18 | @end
19 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageAdaptiveThresholdFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @interface GPUImageAdaptiveThresholdFilter : GPUImageFilterGroup
4 |
5 | /** A multiplier for the background averaging blur radius in pixels, with a default of 4
6 | */
7 | @property(readwrite, nonatomic) CGFloat blurRadiusInPixels;
8 |
9 | @end
10 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageAddBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageAddBlendFilter : GPUImageTwoInputFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageAlphaBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageAlphaBlendFilter : GPUImageTwoInputFilter
4 | {
5 | GLint mixUniform;
6 | }
7 |
8 | // Mix ranges from 0.0 (only image 1) to 1.0 (only image 2), with 1.0 as the normal level
9 | @property(readwrite, nonatomic) CGFloat mix;
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageAmatorkaFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImagePicture;
4 |
5 | /** A photo filter based on Photoshop action by Amatorka
6 | http://amatorka.deviantart.com/art/Amatorka-Action-2-121069631
7 | */
8 |
9 | // Note: If you want to use this effect you have to add lookup_amatorka.png
10 | // from Resources folder to your application bundle.
11 |
12 | @interface GPUImageAmatorkaFilter : GPUImageFilterGroup
13 | {
14 | GPUImagePicture *lookupImageSource;
15 | }
16 |
17 | @end
18 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageAmatorkaFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageAmatorkaFilter.h"
2 | #import "GPUImagePicture.h"
3 | #import "GPUImageLookupFilter.h"
4 |
5 | @implementation GPUImageAmatorkaFilter
6 |
7 | - (id)init;
8 | {
9 | if (!(self = [super init]))
10 | {
11 | return nil;
12 | }
13 |
14 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
15 | UIImage *image = [UIImage imageNamed:@"lookup_amatorka.png"];
16 | #else
17 | NSImage *image = [NSImage imageNamed:@"lookup_amatorka.png"];
18 | #endif
19 |
20 | NSAssert(image, @"To use GPUImageAmatorkaFilter you need to add lookup_amatorka.png from GPUImage/framework/Resources to your application bundle.");
21 |
22 | lookupImageSource = [[GPUImagePicture alloc] initWithImage:image];
23 | GPUImageLookupFilter *lookupFilter = [[GPUImageLookupFilter alloc] init];
24 | [self addFilter:lookupFilter];
25 |
26 | [lookupImageSource addTarget:lookupFilter atTextureLocation:1];
27 | [lookupImageSource processImage];
28 |
29 | self.initialFilters = [NSArray arrayWithObjects:lookupFilter, nil];
30 | self.terminalFilter = lookupFilter;
31 |
32 | return self;
33 | }
34 |
35 | #pragma mark -
36 | #pragma mark Accessors
37 |
38 | @end
39 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageAverageColor.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | extern NSString *const kGPUImageColorAveragingVertexShaderString;
4 |
5 | @interface GPUImageAverageColor : GPUImageFilter
6 | {
7 | GLint texelWidthUniform, texelHeightUniform;
8 |
9 | NSUInteger numberOfStages;
10 |
11 | GLubyte *rawImagePixels;
12 | CGSize finalStageSize;
13 | }
14 |
15 | // This block is called on the completion of color averaging for a frame
16 | @property(nonatomic, copy) void(^colorAverageProcessingFinishedBlock)(CGFloat redComponent, CGFloat greenComponent, CGFloat blueComponent, CGFloat alphaComponent, CMTime frameTime);
17 |
18 | - (void)extractAverageColorAtFrameTime:(CMTime)frameTime;
19 |
20 | @end
21 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageAverageLuminanceThresholdFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @interface GPUImageAverageLuminanceThresholdFilter : GPUImageFilterGroup
4 |
5 | // This is multiplied by the continually calculated average image luminosity to arrive at the final threshold. Default is 1.0.
6 | @property(readwrite, nonatomic) CGFloat thresholdMultiplier;
7 |
8 | @end
9 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageAverageLuminanceThresholdFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageAverageLuminanceThresholdFilter.h"
2 | #import "GPUImageLuminosity.h"
3 | #import "GPUImageLuminanceThresholdFilter.h"
4 |
5 | @interface GPUImageAverageLuminanceThresholdFilter()
6 | {
7 | GPUImageLuminosity *luminosityFilter;
8 | GPUImageLuminanceThresholdFilter *luminanceThresholdFilter;
9 | }
10 | @end
11 |
12 | @implementation GPUImageAverageLuminanceThresholdFilter
13 |
14 | @synthesize thresholdMultiplier = _thresholdMultiplier;
15 |
16 | #pragma mark -
17 | #pragma mark Initialization and teardown
18 |
19 | - (id)init;
20 | {
21 | if (!(self = [super init]))
22 | {
23 | return nil;
24 | }
25 |
26 | self.thresholdMultiplier = 1.0;
27 |
28 | luminosityFilter = [[GPUImageLuminosity alloc] init];
29 | [self addFilter:luminosityFilter];
30 |
31 | luminanceThresholdFilter = [[GPUImageLuminanceThresholdFilter alloc] init];
32 | [self addFilter:luminanceThresholdFilter];
33 |
34 | __unsafe_unretained GPUImageAverageLuminanceThresholdFilter *weakSelf = self;
35 | __unsafe_unretained GPUImageLuminanceThresholdFilter *weakThreshold = luminanceThresholdFilter;
36 |
37 | [luminosityFilter setLuminosityProcessingFinishedBlock:^(CGFloat luminosity, CMTime frameTime) {
38 | weakThreshold.threshold = luminosity * weakSelf.thresholdMultiplier;
39 | }];
40 |
41 | self.initialFilters = [NSArray arrayWithObjects:luminosityFilter, luminanceThresholdFilter, nil];
42 | self.terminalFilter = luminanceThresholdFilter;
43 |
44 | return self;
45 | }
46 |
47 | @end
48 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageBilateralFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageGaussianBlurFilter.h"
2 |
3 | @interface GPUImageBilateralFilter : GPUImageGaussianBlurFilter
4 | {
5 | CGFloat firstDistanceNormalizationFactorUniform;
6 | CGFloat secondDistanceNormalizationFactorUniform;
7 | }
8 | // A normalization factor for the distance between central color and sample color.
9 | @property(nonatomic, readwrite) CGFloat distanceNormalizationFactor;
10 | @end
11 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageBoxBlurFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageGaussianBlurFilter.h"
2 |
3 | /** A hardware-accelerated box blur of an image
4 | */
5 | @interface GPUImageBoxBlurFilter : GPUImageGaussianBlurFilter
6 |
7 | @end
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageBrightnessFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageBrightnessFilter : GPUImageFilter
4 | {
5 | GLint brightnessUniform;
6 | }
7 |
8 | // Brightness ranges from -1.0 to 1.0, with 0.0 as the normal level
9 | @property(readwrite, nonatomic) CGFloat brightness;
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageBrightnessFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageBrightnessFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageBrightnessFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 |
8 | uniform sampler2D inputImageTexture;
9 | uniform lowp float brightness;
10 |
11 | void main()
12 | {
13 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
14 |
15 | gl_FragColor = vec4((textureColor.rgb + vec3(brightness)), textureColor.w);
16 | }
17 | );
18 | #else
19 | NSString *const kGPUImageBrightnessFragmentShaderString = SHADER_STRING
20 | (
21 | varying vec2 textureCoordinate;
22 |
23 | uniform sampler2D inputImageTexture;
24 | uniform float brightness;
25 |
26 | void main()
27 | {
28 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
29 |
30 | gl_FragColor = vec4((textureColor.rgb + vec3(brightness)), textureColor.w);
31 | }
32 | );
33 | #endif
34 |
35 | @implementation GPUImageBrightnessFilter
36 |
37 | @synthesize brightness = _brightness;
38 |
39 | #pragma mark -
40 | #pragma mark Initialization and teardown
41 |
42 | - (id)init;
43 | {
44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageBrightnessFragmentShaderString]))
45 | {
46 | return nil;
47 | }
48 |
49 | brightnessUniform = [filterProgram uniformIndex:@"brightness"];
50 | self.brightness = 0.0;
51 |
52 | return self;
53 | }
54 |
55 | #pragma mark -
56 | #pragma mark Accessors
57 |
58 | - (void)setBrightness:(CGFloat)newValue;
59 | {
60 | _brightness = newValue;
61 |
62 | [self setFloat:_brightness forUniform:brightnessUniform program:filterProgram];
63 | }
64 |
65 | @end
66 |
67 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageBuffer.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageBuffer : GPUImageFilter
4 | {
5 | NSMutableArray *bufferedFramebuffers;
6 | }
7 |
8 | @property(readwrite, nonatomic) NSUInteger bufferSize;
9 |
10 | @end
11 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageBulgeDistortionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /// Creates a bulge distortion on the image
4 | @interface GPUImageBulgeDistortionFilter : GPUImageFilter
5 | {
6 | GLint aspectRatioUniform, radiusUniform, centerUniform, scaleUniform;
7 | }
8 |
9 | /// The center about which to apply the distortion, with a default of (0.5, 0.5)
10 | @property(readwrite, nonatomic) CGPoint center;
11 | /// The radius of the distortion, ranging from 0.0 to 1.0, with a default of 0.25
12 | @property(readwrite, nonatomic) CGFloat radius;
13 | /// The amount of distortion to apply, from -1.0 to 1.0, with a default of 0.5
14 | @property(readwrite, nonatomic) CGFloat scale;
15 |
16 | @end
17 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageCGAColorspaceFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageCGAColorspaceFilter : GPUImageFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageChromaKeyBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | /** Selectively replaces a color in the first image with the second image
4 | */
5 | @interface GPUImageChromaKeyBlendFilter : GPUImageTwoInputFilter
6 | {
7 | GLint colorToReplaceUniform, thresholdSensitivityUniform, smoothingUniform;
8 | }
9 |
10 | /** The threshold sensitivity controls how similar pixels need to be colored to be replaced
11 |
12 | The default value is 0.3
13 | */
14 | @property(readwrite, nonatomic) CGFloat thresholdSensitivity;
15 |
16 | /** The degree of smoothing controls how gradually similar colors are replaced in the image
17 |
18 | The default value is 0.1
19 | */
20 | @property(readwrite, nonatomic) CGFloat smoothing;
21 |
22 | /** The color to be replaced is specified using individual red, green, and blue components (normalized to 1.0).
23 |
24 | The default is green: (0.0, 1.0, 0.0).
25 |
26 | @param redComponent Red component of color to be replaced
27 | @param greenComponent Green component of color to be replaced
28 | @param blueComponent Blue component of color to be replaced
29 | */
30 | - (void)setColorToReplaceRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent;
31 |
32 | @end
33 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageChromaKeyFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageChromaKeyFilter : GPUImageFilter
4 | {
5 | GLint colorToReplaceUniform, thresholdSensitivityUniform, smoothingUniform;
6 | }
7 |
8 | /** The threshold sensitivity controls how similar pixels need to be colored to be replaced
9 |
10 | The default value is 0.3
11 | */
12 | @property(readwrite, nonatomic) CGFloat thresholdSensitivity;
13 |
14 | /** The degree of smoothing controls how gradually similar colors are replaced in the image
15 |
16 | The default value is 0.1
17 | */
18 | @property(readwrite, nonatomic) CGFloat smoothing;
19 |
20 | /** The color to be replaced is specified using individual red, green, and blue components (normalized to 1.0).
21 |
22 | The default is green: (0.0, 1.0, 0.0).
23 |
24 | @param redComponent Red component of color to be replaced
25 | @param greenComponent Green component of color to be replaced
26 | @param blueComponent Blue component of color to be replaced
27 | */
28 | - (void)setColorToReplaceRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent;
29 |
30 | @end
31 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageClosingFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImageErosionFilter;
4 | @class GPUImageDilationFilter;
5 |
6 | // A filter that first performs a dilation on the red channel of an image, followed by an erosion of the same radius.
7 | // This helps to filter out smaller dark elements.
8 |
9 | @interface GPUImageClosingFilter : GPUImageFilterGroup
10 | {
11 | GPUImageErosionFilter *erosionFilter;
12 | GPUImageDilationFilter *dilationFilter;
13 | }
14 |
15 | @property(readwrite, nonatomic) CGFloat verticalTexelSpacing, horizontalTexelSpacing;
16 |
17 | - (id)initWithRadius:(NSUInteger)radius;
18 |
19 | @end
20 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageClosingFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageClosingFilter.h"
2 | #import "GPUImageErosionFilter.h"
3 | #import "GPUImageDilationFilter.h"
4 |
5 | @implementation GPUImageClosingFilter
6 |
7 | @synthesize verticalTexelSpacing = _verticalTexelSpacing;
8 | @synthesize horizontalTexelSpacing = _horizontalTexelSpacing;
9 |
10 | - (id)init;
11 | {
12 | if (!(self = [self initWithRadius:1]))
13 | {
14 | return nil;
15 | }
16 |
17 | return self;
18 | }
19 |
20 | - (id)initWithRadius:(NSUInteger)radius;
21 | {
22 | if (!(self = [super init]))
23 | {
24 | return nil;
25 | }
26 |
27 | // First pass: dilation
28 | dilationFilter = [[GPUImageDilationFilter alloc] initWithRadius:radius];
29 | [self addFilter:dilationFilter];
30 |
31 | // Second pass: erosion
32 | erosionFilter = [[GPUImageErosionFilter alloc] initWithRadius:radius];
33 | [self addFilter:erosionFilter];
34 |
35 | [dilationFilter addTarget:erosionFilter];
36 |
37 | self.initialFilters = [NSArray arrayWithObjects:dilationFilter, nil];
38 | self.terminalFilter = erosionFilter;
39 |
40 | return self;
41 | }
42 |
43 | - (void)setVerticalTexelSpacing:(CGFloat)newValue;
44 | {
45 | _verticalTexelSpacing = newValue;
46 | erosionFilter.verticalTexelSpacing = newValue;
47 | dilationFilter.verticalTexelSpacing = newValue;
48 | }
49 |
50 | - (void)setHorizontalTexelSpacing:(CGFloat)newValue;
51 | {
52 | _horizontalTexelSpacing = newValue;
53 | erosionFilter.horizontalTexelSpacing = newValue;
54 | dilationFilter.horizontalTexelSpacing = newValue;
55 | }
56 |
57 | @end
58 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColorBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageColorBlendFilter : GPUImageTwoInputFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColorBurnBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | /** Applies a color burn blend of two images
4 | */
5 | @interface GPUImageColorBurnBlendFilter : GPUImageTwoInputFilter
6 | {
7 | }
8 |
9 | @end
10 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColorBurnBlendFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageColorBurnBlendFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageColorBurnBlendFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 | varying highp vec2 textureCoordinate2;
8 |
9 | uniform sampler2D inputImageTexture;
10 | uniform sampler2D inputImageTexture2;
11 |
12 | void main()
13 | {
14 | mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
15 | mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
16 | mediump vec4 whiteColor = vec4(1.0);
17 | gl_FragColor = whiteColor - (whiteColor - textureColor) / textureColor2;
18 | }
19 | );
20 | #else
21 | NSString *const kGPUImageColorBurnBlendFragmentShaderString = SHADER_STRING
22 | (
23 | varying vec2 textureCoordinate;
24 | varying vec2 textureCoordinate2;
25 |
26 | uniform sampler2D inputImageTexture;
27 | uniform sampler2D inputImageTexture2;
28 |
29 | void main()
30 | {
31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
33 | vec4 whiteColor = vec4(1.0);
34 | gl_FragColor = whiteColor - (whiteColor - textureColor) / textureColor2;
35 | }
36 | );
37 | #endif
38 |
39 | @implementation GPUImageColorBurnBlendFilter
40 |
41 | - (id)init;
42 | {
43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageColorBurnBlendFragmentShaderString]))
44 | {
45 | return nil;
46 | }
47 |
48 | return self;
49 | }
50 |
51 | @end
52 |
53 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColorConversion.h:
--------------------------------------------------------------------------------
1 | #ifndef GPUImageColorConversion_h
2 | #define GPUImageColorConversion_h
3 |
4 | extern GLfloat *kColorConversion601;
5 | extern GLfloat *kColorConversion601FullRange;
6 | extern GLfloat *kColorConversion709;
7 | extern NSString *const kGPUImageYUVVideoRangeConversionForRGFragmentShaderString;
8 | extern NSString *const kGPUImageYUVFullRangeConversionForLAFragmentShaderString;
9 | extern NSString *const kGPUImageYUVVideoRangeConversionForLAFragmentShaderString;
10 |
11 |
12 | #endif /* GPUImageColorConversion_h */
13 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColorDodgeBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | /** Applies a color dodge blend of two images
4 | */
5 | @interface GPUImageColorDodgeBlendFilter : GPUImageTwoInputFilter
6 | {
7 | }
8 |
9 | @end
10 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColorInvertFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageColorInvertFilter : GPUImageFilter
4 | {
5 | }
6 |
7 | @end
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColorInvertFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageColorInvertFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageInvertFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 |
8 | uniform sampler2D inputImageTexture;
9 |
10 | void main()
11 | {
12 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
13 |
14 | gl_FragColor = vec4((1.0 - textureColor.rgb), textureColor.w);
15 | }
16 | );
17 | #else
18 | NSString *const kGPUImageInvertFragmentShaderString = SHADER_STRING
19 | (
20 | varying vec2 textureCoordinate;
21 |
22 | uniform sampler2D inputImageTexture;
23 |
24 | void main()
25 | {
26 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
27 |
28 | gl_FragColor = vec4((1.0 - textureColor.rgb), textureColor.w);
29 | }
30 | );
31 | #endif
32 |
33 | @implementation GPUImageColorInvertFilter
34 |
35 | - (id)init;
36 | {
37 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageInvertFragmentShaderString]))
38 | {
39 | return nil;
40 | }
41 |
42 | return self;
43 | }
44 |
45 | @end
46 |
47 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColorLocalBinaryPatternFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImage3x3TextureSamplingFilter.h"
2 |
3 | @interface GPUImageColorLocalBinaryPatternFilter : GPUImage3x3TextureSamplingFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColorMatrixFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /** Transforms the colors of an image by applying a matrix to them
4 | */
5 | @interface GPUImageColorMatrixFilter : GPUImageFilter
6 | {
7 | GLint colorMatrixUniform;
8 | GLint intensityUniform;
9 | }
10 |
11 | /** A 4x4 matrix used to transform each color in an image
12 | */
13 | @property(readwrite, nonatomic) GPUMatrix4x4 colorMatrix;
14 |
15 | /** The degree to which the new transformed color replaces the original color for each pixel
16 | */
17 | @property(readwrite, nonatomic) CGFloat intensity;
18 |
19 | @end
20 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColorPackingFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageColorPackingFilter : GPUImageFilter
4 | {
5 | GLint texelWidthUniform, texelHeightUniform;
6 |
7 | CGFloat texelWidth, texelHeight;
8 | }
9 |
10 | @end
11 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColourFASTFeatureDetector.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | // This generates image-wide feature descriptors using the ColourFAST process, as developed and described in
4 | //
5 | // A. Ensor and S. Hall. ColourFAST: GPU-based feature point detection and tracking on mobile devices. 28th International Conference of Image and Vision Computing, New Zealand, 2013, p. 124-129.
6 | //
7 | // Seth Hall, "GPU accelerated feature algorithms for mobile devices", PhD thesis, School of Computing and Mathematical Sciences, Auckland University of Technology 2014.
8 | // http://aut.researchgateway.ac.nz/handle/10292/7991
9 |
10 | @class GPUImageColourFASTSamplingOperation;
11 | @class GPUImageBoxBlurFilter;
12 |
13 | @interface GPUImageColourFASTFeatureDetector : GPUImageFilterGroup
14 | {
15 | GPUImageBoxBlurFilter *blurFilter;
16 | GPUImageColourFASTSamplingOperation *colourFASTSamplingOperation;
17 | }
18 | // The blur radius of the underlying box blur. The default is 3.0.
19 | @property (readwrite, nonatomic) CGFloat blurRadiusInPixels;
20 |
21 | @end
22 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColourFASTFeatureDetector.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageColourFASTFeatureDetector.h"
2 | #import "GPUImageColourFASTSamplingOperation.h"
3 | #import "GPUImageBoxBlurFilter.h"
4 |
5 | @implementation GPUImageColourFASTFeatureDetector
6 |
7 | @synthesize blurRadiusInPixels;
8 |
9 | - (id)init;
10 | {
11 | if (!(self = [super init]))
12 | {
13 | return nil;
14 | }
15 |
16 | // First pass: apply a variable Gaussian blur
17 | blurFilter = [[GPUImageBoxBlurFilter alloc] init];
18 | [self addFilter:blurFilter];
19 |
20 | // Second pass: combine the blurred image with the original sharp one
21 | colourFASTSamplingOperation = [[GPUImageColourFASTSamplingOperation alloc] init];
22 | [self addFilter:colourFASTSamplingOperation];
23 |
24 | // Texture location 0 needs to be the sharp image for both the blur and the second stage processing
25 | [blurFilter addTarget:colourFASTSamplingOperation atTextureLocation:1];
26 |
27 | self.initialFilters = [NSArray arrayWithObjects:blurFilter, colourFASTSamplingOperation, nil];
28 | self.terminalFilter = colourFASTSamplingOperation;
29 |
30 | self.blurRadiusInPixels = 3.0;
31 |
32 | return self;
33 | }
34 |
35 | #pragma mark -
36 | #pragma mark Accessors
37 |
38 | - (void)setBlurRadiusInPixels:(CGFloat)newValue;
39 | {
40 | blurFilter.blurRadiusInPixels = newValue;
41 | }
42 |
43 | - (CGFloat)blurRadiusInPixels;
44 | {
45 | return blurFilter.blurRadiusInPixels;
46 | }
47 |
48 | @end
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageColourFASTSamplingOperation.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | // This is the feature extraction phase of the ColourFAST feature detector, as described in:
4 | //
5 | // A. Ensor and S. Hall. ColourFAST: GPU-based feature point detection and tracking on mobile devices. 28th International Conference of Image and Vision Computing, New Zealand, 2013, p. 124-129.
6 | //
7 | // Seth Hall, "GPU accelerated feature algorithms for mobile devices", PhD thesis, School of Computing and Mathematical Sciences, Auckland University of Technology 2014.
8 | // http://aut.researchgateway.ac.nz/handle/10292/7991
9 |
10 | @interface GPUImageColourFASTSamplingOperation : GPUImageTwoInputFilter
11 | {
12 | GLint texelWidthUniform, texelHeightUniform;
13 |
14 | CGFloat texelWidth, texelHeight;
15 | BOOL hasOverriddenImageSizeFactor;
16 | }
17 |
18 | // The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects.
19 | @property(readwrite, nonatomic) CGFloat texelWidth;
20 | @property(readwrite, nonatomic) CGFloat texelHeight;
21 |
22 | @end
23 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageContrastFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /** Adjusts the contrast of the image
4 | */
5 | @interface GPUImageContrastFilter : GPUImageFilter
6 | {
7 | GLint contrastUniform;
8 | }
9 |
10 | /** Contrast ranges from 0.0 to 4.0 (max contrast), with 1.0 as the normal level
11 | */
12 | @property(readwrite, nonatomic) CGFloat contrast;
13 |
14 | @end
15 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageContrastFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageContrastFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageContrastFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 |
8 | uniform sampler2D inputImageTexture;
9 | uniform lowp float contrast;
10 |
11 | void main()
12 | {
13 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
14 |
15 | gl_FragColor = vec4(((textureColor.rgb - vec3(0.5)) * contrast + vec3(0.5)), textureColor.w);
16 | }
17 | );
18 | #else
19 | NSString *const kGPUImageContrastFragmentShaderString = SHADER_STRING
20 | (
21 | varying vec2 textureCoordinate;
22 |
23 | uniform sampler2D inputImageTexture;
24 | uniform float contrast;
25 |
26 | void main()
27 | {
28 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
29 |
30 | gl_FragColor = vec4(((textureColor.rgb - vec3(0.5)) * contrast + vec3(0.5)), textureColor.w);
31 | }
32 | );
33 | #endif
34 |
35 | @implementation GPUImageContrastFilter
36 |
37 | @synthesize contrast = _contrast;
38 |
39 | #pragma mark -
40 | #pragma mark Initialization
41 |
42 | - (id)init;
43 | {
44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageContrastFragmentShaderString]))
45 | {
46 | return nil;
47 | }
48 |
49 | contrastUniform = [filterProgram uniformIndex:@"contrast"];
50 | self.contrast = 1.0;
51 |
52 | return self;
53 | }
54 |
55 | #pragma mark -
56 | #pragma mark Accessors
57 |
58 | - (void)setContrast:(CGFloat)newValue;
59 | {
60 | _contrast = newValue;
61 |
62 | [self setFloat:_contrast forUniform:contrastUniform program:filterProgram];
63 | }
64 |
65 | @end
66 |
67 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageCropFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageCropFilter : GPUImageFilter
4 | {
5 | GLfloat cropTextureCoordinates[8];
6 | }
7 |
8 | // The crop region is the rectangle within the image to crop. It is normalized to a coordinate space from 0.0 to 1.0, with 0.0, 0.0 being the upper left corner of the image
9 | @property(readwrite, nonatomic) CGRect cropRegion;
10 |
11 | // Initialization and teardown
12 | - (id)initWithCropRegion:(CGRect)newCropRegion;
13 |
14 | @end
15 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageCrosshairGenerator.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageCrosshairGenerator : GPUImageFilter
4 | {
5 | GLint crosshairWidthUniform, crosshairColorUniform;
6 | }
7 |
8 | // The width of the displayed crosshairs, in pixels. Currently this only works well for odd widths. The default is 5.
9 | @property(readwrite, nonatomic) CGFloat crosshairWidth;
10 |
11 | // The color of the crosshairs is specified using individual red, green, and blue components (normalized to 1.0). The default is green: (0.0, 1.0, 0.0).
12 | - (void)setCrosshairColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent;
13 |
14 | // Rendering
15 | - (void)renderCrosshairsFromArray:(GLfloat *)crosshairCoordinates count:(NSUInteger)numberOfCrosshairs frameTime:(CMTime)frameTime;
16 |
17 | @end
18 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageCrosshatchFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageCrosshatchFilter : GPUImageFilter
4 | {
5 | GLint crossHatchSpacingUniform, lineWidthUniform;
6 | }
7 | // The fractional width of the image to use as the spacing for the crosshatch. The default is 0.03.
8 | @property(readwrite, nonatomic) CGFloat crossHatchSpacing;
9 |
10 | // A relative width for the crosshatch lines. The default is 0.003.
11 | @property(readwrite, nonatomic) CGFloat lineWidth;
12 |
13 | @end
14 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageDarkenBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageDarkenBlendFilter : GPUImageTwoInputFilter
4 | {
5 | }
6 |
7 | @end
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageDarkenBlendFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageDarkenBlendFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageDarkenBlendFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 | varying highp vec2 textureCoordinate2;
8 |
9 | uniform sampler2D inputImageTexture;
10 | uniform sampler2D inputImageTexture2;
11 |
12 | void main()
13 | {
14 | lowp vec4 base = texture2D(inputImageTexture, textureCoordinate);
15 | lowp vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2);
16 |
17 | gl_FragColor = vec4(min(overlayer.rgb * base.a, base.rgb * overlayer.a) + overlayer.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlayer.a), 1.0);
18 | }
19 | );
20 | #else
21 | NSString *const kGPUImageDarkenBlendFragmentShaderString = SHADER_STRING
22 | (
23 | varying vec2 textureCoordinate;
24 | varying vec2 textureCoordinate2;
25 |
26 | uniform sampler2D inputImageTexture;
27 | uniform sampler2D inputImageTexture2;
28 |
29 | void main()
30 | {
31 | vec4 base = texture2D(inputImageTexture, textureCoordinate);
32 | vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2);
33 |
34 | gl_FragColor = vec4(min(overlayer.rgb * base.a, base.rgb * overlayer.a) + overlayer.rgb * (1.0 - base.a) + base.rgb * (1.0 - overlayer.a), 1.0);
35 | }
36 | );
37 | #endif
38 |
39 | @implementation GPUImageDarkenBlendFilter
40 |
41 | - (id)init;
42 | {
43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageDarkenBlendFragmentShaderString]))
44 | {
45 | return nil;
46 | }
47 |
48 | return self;
49 | }
50 |
51 | @end
52 |
53 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageDifferenceBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageDifferenceBlendFilter : GPUImageTwoInputFilter
4 | {
5 | }
6 |
7 | @end
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageDifferenceBlendFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageDifferenceBlendFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageDifferenceBlendFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 | varying highp vec2 textureCoordinate2;
8 |
9 | uniform sampler2D inputImageTexture;
10 | uniform sampler2D inputImageTexture2;
11 |
12 | void main()
13 | {
14 | mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
15 | mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
16 | gl_FragColor = vec4(abs(textureColor2.rgb - textureColor.rgb), textureColor.a);
17 | }
18 | );
19 | #else
20 | NSString *const kGPUImageDifferenceBlendFragmentShaderString = SHADER_STRING
21 | (
22 | varying vec2 textureCoordinate;
23 | varying vec2 textureCoordinate2;
24 |
25 | uniform sampler2D inputImageTexture;
26 | uniform sampler2D inputImageTexture2;
27 |
28 | void main()
29 | {
30 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
31 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
32 | gl_FragColor = vec4(abs(textureColor2.rgb - textureColor.rgb), textureColor.a);
33 | }
34 | );
35 | #endif
36 |
37 | @implementation GPUImageDifferenceBlendFilter
38 |
39 | - (id)init;
40 | {
41 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageDifferenceBlendFragmentShaderString]))
42 | {
43 | return nil;
44 | }
45 |
46 | return self;
47 | }
48 |
49 | @end
50 |
51 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageDilationFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoPassTextureSamplingFilter.h"
2 |
3 | // For each pixel, this sets it to the maximum value of the red channel in a rectangular neighborhood extending out dilationRadius pixels from the center.
4 | // This extends out bright features, and is most commonly used with black-and-white thresholded images.
5 |
6 | extern NSString *const kGPUImageDilationRadiusOneVertexShaderString;
7 | extern NSString *const kGPUImageDilationRadiusTwoVertexShaderString;
8 | extern NSString *const kGPUImageDilationRadiusThreeVertexShaderString;
9 | extern NSString *const kGPUImageDilationRadiusFourVertexShaderString;
10 |
11 | @interface GPUImageDilationFilter : GPUImageTwoPassTextureSamplingFilter
12 |
13 | // Acceptable values for dilationRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4.
14 | - (id)initWithRadius:(NSUInteger)dilationRadius;
15 |
16 | @end
17 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageDirectionalNonMaximumSuppressionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageDirectionalNonMaximumSuppressionFilter : GPUImageFilter
4 | {
5 | GLint texelWidthUniform, texelHeightUniform;
6 | GLint upperThresholdUniform, lowerThresholdUniform;
7 |
8 | BOOL hasOverriddenImageSizeFactor;
9 | }
10 |
11 | // The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects.
12 | @property(readwrite, nonatomic) CGFloat texelWidth;
13 | @property(readwrite, nonatomic) CGFloat texelHeight;
14 |
15 | // These thresholds set cutoffs for the intensities that definitely get registered (upper threshold) and those that definitely don't (lower threshold)
16 | @property(readwrite, nonatomic) CGFloat upperThreshold;
17 | @property(readwrite, nonatomic) CGFloat lowerThreshold;
18 |
19 | @end
20 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageDirectionalSobelEdgeDetectionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImage3x3TextureSamplingFilter.h"
2 |
3 | @interface GPUImageDirectionalSobelEdgeDetectionFilter : GPUImage3x3TextureSamplingFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageDissolveBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageDissolveBlendFilter : GPUImageTwoInputFilter
4 | {
5 | GLint mixUniform;
6 | }
7 |
8 | // Mix ranges from 0.0 (only image 1) to 1.0 (only image 2), with 0.5 (half of either) as the normal level
9 | @property(readwrite, nonatomic) CGFloat mix;
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageDivideBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageDivideBlendFilter : GPUImageTwoInputFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageEmbossFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImage3x3ConvolutionFilter.h"
2 |
3 | @interface GPUImageEmbossFilter : GPUImage3x3ConvolutionFilter
4 |
5 | // The strength of the embossing, from 0.0 to 4.0, with 1.0 as the normal level
6 | @property(readwrite, nonatomic) CGFloat intensity;
7 |
8 | @end
9 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageEmbossFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageEmbossFilter.h"
2 |
3 | @implementation GPUImageEmbossFilter
4 |
5 | @synthesize intensity = _intensity;
6 |
7 | - (id)init;
8 | {
9 | if (!(self = [super init]))
10 | {
11 | return nil;
12 | }
13 |
14 | self.intensity = 1.0;
15 |
16 | return self;
17 | }
18 |
19 | #pragma mark -
20 | #pragma mark Accessors
21 |
22 | - (void)setIntensity:(CGFloat)newValue;
23 | {
24 | // [(GPUImage3x3ConvolutionFilter *)filter setConvolutionMatrix:(GPUMatrix3x3){
25 | // {-2.0f, -1.0f, 0.0f},
26 | // {-1.0f, 1.0f, 1.0f},
27 | // { 0.0f, 1.0f, 2.0f}
28 | // }];
29 |
30 | _intensity = newValue;
31 |
32 | GPUMatrix3x3 newConvolutionMatrix;
33 | newConvolutionMatrix.one.one = _intensity * (-2.0);
34 | newConvolutionMatrix.one.two = -_intensity;
35 | newConvolutionMatrix.one.three = 0.0f;
36 |
37 | newConvolutionMatrix.two.one = -_intensity;
38 | newConvolutionMatrix.two.two = 1.0;
39 | newConvolutionMatrix.two.three = _intensity;
40 |
41 | newConvolutionMatrix.three.one = 0.0f;
42 | newConvolutionMatrix.three.two = _intensity;
43 | newConvolutionMatrix.three.three = _intensity * 2.0;
44 |
45 | self.convolutionKernel = newConvolutionMatrix;
46 | }
47 |
48 |
49 | @end
50 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageErosionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoPassTextureSamplingFilter.h"
2 |
3 | // For each pixel, this sets it to the minimum value of the red channel in a rectangular neighborhood extending out dilationRadius pixels from the center.
4 | // This extends out dark features, and is most commonly used with black-and-white thresholded images.
5 |
6 | @interface GPUImageErosionFilter : GPUImageTwoPassTextureSamplingFilter
7 |
8 | // Acceptable values for erosionRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4.
9 | - (id)initWithRadius:(NSUInteger)erosionRadius;
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageExclusionBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageExclusionBlendFilter : GPUImageTwoInputFilter
4 | {
5 | }
6 |
7 | @end
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageExposureFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageExposureFilter : GPUImageFilter
4 | {
5 | GLint exposureUniform;
6 | }
7 |
8 | // Exposure ranges from -10.0 to 10.0, with 0.0 as the normal level
9 | @property(readwrite, nonatomic) CGFloat exposure;
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageExposureFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageExposureFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageExposureFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 |
8 | uniform sampler2D inputImageTexture;
9 | uniform highp float exposure;
10 |
11 | void main()
12 | {
13 | highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
14 |
15 | gl_FragColor = vec4(textureColor.rgb * pow(2.0, exposure), textureColor.w);
16 | }
17 | );
18 | #else
19 | NSString *const kGPUImageExposureFragmentShaderString = SHADER_STRING
20 | (
21 | varying vec2 textureCoordinate;
22 |
23 | uniform sampler2D inputImageTexture;
24 | uniform float exposure;
25 |
26 | void main()
27 | {
28 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
29 |
30 | gl_FragColor = vec4(textureColor.rgb * pow(2.0, exposure), textureColor.w);
31 | }
32 | );
33 | #endif
34 |
35 | @implementation GPUImageExposureFilter
36 |
37 | @synthesize exposure = _exposure;
38 |
39 | #pragma mark -
40 | #pragma mark Initialization and teardown
41 |
42 | - (id)init;
43 | {
44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageExposureFragmentShaderString]))
45 | {
46 | return nil;
47 | }
48 |
49 | exposureUniform = [filterProgram uniformIndex:@"exposure"];
50 | self.exposure = 0.0;
51 |
52 | return self;
53 | }
54 |
55 | #pragma mark -
56 | #pragma mark Accessors
57 |
58 | - (void)setExposure:(CGFloat)newValue;
59 | {
60 | _exposure = newValue;
61 |
62 | [self setFloat:_exposure forUniform:exposureUniform program:filterProgram];
63 | }
64 |
65 | @end
66 |
67 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageFASTCornerDetectionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImageGrayscaleFilter;
4 | @class GPUImage3x3TextureSamplingFilter;
5 | @class GPUImageNonMaximumSuppressionFilter;
6 |
7 | /*
8 | An implementation of the Features from Accelerated Segment Test (FAST) feature detector as described in the following publications:
9 |
10 | E. Rosten and T. Drummond. Fusing points and lines for high performance tracking. IEEE International Conference on Computer Vision, 2005.
11 | E. Rosten and T. Drummond. Machine learning for high-speed corner detection. European Conference on Computer Vision, 2006.
12 |
13 | For more about the FAST feature detector, see the resources here:
14 | http://www.edwardrosten.com/work/fast.html
15 | */
16 |
17 | typedef enum { kGPUImageFAST12Contiguous, kGPUImageFAST12ContiguousNonMaximumSuppressed} GPUImageFASTDetectorType;
18 |
19 | @interface GPUImageFASTCornerDetectionFilter : GPUImageFilterGroup
20 | {
21 | GPUImageGrayscaleFilter *luminanceReductionFilter;
22 | GPUImage3x3TextureSamplingFilter *featureDetectionFilter;
23 | GPUImageNonMaximumSuppressionFilter *nonMaximumSuppressionFilter;
24 | // Generate a lookup texture based on the bit patterns
25 |
26 | // Step 1: convert to monochrome if necessary
27 | // Step 2: do a lookup at each pixel based on the Bresenham circle, encode comparison in two color components
28 | // Step 3: do non-maximum suppression of close corner points
29 | }
30 |
31 | - (id)initWithFASTDetectorVariant:(GPUImageFASTDetectorType)detectorType;
32 |
33 | @end
34 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageFalseColorFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageFalseColorFilter : GPUImageFilter
4 | {
5 | GLint firstColorUniform, secondColorUniform;
6 | }
7 |
8 | // The first and second colors specify what colors replace the dark and light areas of the image, respectively. The defaults are (0.0, 0.0, 0.5) amd (1.0, 0.0, 0.0).
9 | @property(readwrite, nonatomic) GPUVector4 firstColor;
10 | @property(readwrite, nonatomic) GPUVector4 secondColor;
11 |
12 | - (void)setFirstColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent;
13 | - (void)setSecondColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent;
14 |
15 | @end
16 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageFilterGroup.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageOutput.h"
2 | #import "GPUImageFilter.h"
3 |
4 | @interface GPUImageFilterGroup : GPUImageOutput
5 | {
6 | NSMutableArray *filters;
7 | BOOL isEndProcessing;
8 | }
9 |
10 | @property(readwrite, nonatomic, strong) GPUImageOutput *terminalFilter;
11 | @property(readwrite, nonatomic, strong) NSArray *initialFilters;
12 | @property(readwrite, nonatomic, strong) GPUImageOutput *inputFilterToIgnoreForUpdates;
13 |
14 | // Filter management
15 | - (void)addFilter:(GPUImageOutput *)newFilter;
16 | - (GPUImageOutput *)filterAtIndex:(NSUInteger)filterIndex;
17 | - (NSUInteger)filterCount;
18 |
19 | @end
20 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageFilterPipeline.h:
--------------------------------------------------------------------------------
1 | #import
2 | #import "GPUImageOutput.h"
3 |
4 | @interface GPUImageFilterPipeline : NSObject
5 | {
6 | NSString *stringValue;
7 | }
8 |
9 | @property (strong) NSMutableArray *filters;
10 |
11 | @property (strong) GPUImageOutput *input;
12 | @property (strong) id output;
13 |
14 | - (id) initWithOrderedFilters:(NSArray*) filters input:(GPUImageOutput*)input output:(id )output;
15 | - (id) initWithConfiguration:(NSDictionary*) configuration input:(GPUImageOutput*)input output:(id )output;
16 | - (id) initWithConfigurationFile:(NSURL*) configuration input:(GPUImageOutput*)input output:(id )output;
17 |
18 | - (void) addFilter:(GPUImageOutput *)filter;
19 | - (void) addFilter:(GPUImageOutput *)filter atIndex:(NSUInteger)insertIndex;
20 | - (void) replaceFilterAtIndex:(NSUInteger)index withFilter:(GPUImageOutput *)filter;
21 | - (void) replaceAllFilters:(NSArray *) newFilters;
22 | - (void) removeFilter:(GPUImageOutput *)filter;
23 | - (void) removeFilterAtIndex:(NSUInteger)index;
24 | - (void) removeAllFilters;
25 |
26 | - (UIImage *) currentFilteredFrame;
27 | - (UIImage *) currentFilteredFrameWithOrientation:(UIImageOrientation)imageOrientation;
28 | - (CGImageRef) newCGImageFromCurrentFilteredFrame;
29 |
30 | @end
31 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageFourInputFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageThreeInputFilter.h"
2 |
3 | extern NSString *const kGPUImageFourInputTextureVertexShaderString;
4 |
5 | @interface GPUImageFourInputFilter : GPUImageThreeInputFilter
6 | {
7 | GPUImageFramebuffer *fourthInputFramebuffer;
8 |
9 | GLint filterFourthTextureCoordinateAttribute;
10 | GLint filterInputTextureUniform4;
11 | GPUImageRotationMode inputRotation4;
12 | GLuint filterSourceTexture4;
13 | CMTime fourthFrameTime;
14 |
15 | BOOL hasSetThirdTexture, hasReceivedFourthFrame, fourthFrameWasVideo;
16 | BOOL fourthFrameCheckDisabled;
17 | }
18 |
19 | - (void)disableFourthFrameCheck;
20 |
21 | @end
22 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageFramebuffer.h:
--------------------------------------------------------------------------------
1 | #import
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | #import
5 | #import
6 | #import
7 | #else
8 | #import
9 | #import
10 | #endif
11 |
12 | #import
13 | #import
14 |
15 |
16 | typedef struct GPUTextureOptions {
17 | GLenum minFilter;
18 | GLenum magFilter;
19 | GLenum wrapS;
20 | GLenum wrapT;
21 | GLenum internalFormat;
22 | GLenum format;
23 | GLenum type;
24 | } GPUTextureOptions;
25 |
26 | @interface GPUImageFramebuffer : NSObject
27 |
28 | @property(readonly) CGSize size;
29 | @property(readonly) GPUTextureOptions textureOptions;
30 | @property(readonly) GLuint texture;
31 | @property(readonly) BOOL missingFramebuffer;
32 |
33 | // Initialization and teardown
34 | - (id)initWithSize:(CGSize)framebufferSize;
35 | - (id)initWithSize:(CGSize)framebufferSize textureOptions:(GPUTextureOptions)fboTextureOptions onlyTexture:(BOOL)onlyGenerateTexture;
36 | - (id)initWithSize:(CGSize)framebufferSize overriddenTexture:(GLuint)inputTexture;
37 |
38 | // Usage
39 | - (void)activateFramebuffer;
40 |
41 | // Reference counting
42 | - (void)lock;
43 | - (void)unlock;
44 | - (void)clearAllLocks;
45 | - (void)disableReferenceCounting;
46 | - (void)enableReferenceCounting;
47 |
48 | // Image capture
49 | - (CGImageRef)newCGImageFromFramebufferContents;
50 | - (void)restoreRenderTarget;
51 |
52 | // Raw data bytes
53 | - (void)lockForReading;
54 | - (void)unlockAfterReading;
55 | - (NSUInteger)bytesPerRow;
56 | - (GLubyte *)byteBuffer;
57 | - (CVPixelBufferRef)pixelBuffer;
58 |
59 | @end
60 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageFramebufferCache.h:
--------------------------------------------------------------------------------
1 | #import
2 | #import
3 | #import "GPUImageFramebuffer.h"
4 |
5 | @interface GPUImageFramebufferCache : NSObject
6 |
7 | // Framebuffer management
8 | - (GPUImageFramebuffer *)fetchFramebufferForSize:(CGSize)framebufferSize textureOptions:(GPUTextureOptions)textureOptions onlyTexture:(BOOL)onlyTexture;
9 | - (GPUImageFramebuffer *)fetchFramebufferForSize:(CGSize)framebufferSize onlyTexture:(BOOL)onlyTexture;
10 | - (void)returnFramebufferToCache:(GPUImageFramebuffer *)framebuffer;
11 | - (void)purgeAllUnassignedFramebuffers;
12 | - (void)addFramebufferToActiveImageCaptureList:(GPUImageFramebuffer *)framebuffer;
13 | - (void)removeFramebufferFromActiveImageCaptureList:(GPUImageFramebuffer *)framebuffer;
14 |
15 | @end
16 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageGammaFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageGammaFilter : GPUImageFilter
4 | {
5 | GLint gammaUniform;
6 | }
7 |
8 | // Gamma ranges from 0.0 to 3.0, with 1.0 as the normal level
9 | @property(readwrite, nonatomic) CGFloat gamma;
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageGammaFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageGammaFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageGammaFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 |
8 | uniform sampler2D inputImageTexture;
9 | uniform lowp float gamma;
10 |
11 | void main()
12 | {
13 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
14 |
15 | gl_FragColor = vec4(pow(textureColor.rgb, vec3(gamma)), textureColor.w);
16 | }
17 | );
18 | #else
19 | NSString *const kGPUImageGammaFragmentShaderString = SHADER_STRING
20 | (
21 | varying vec2 textureCoordinate;
22 |
23 | uniform sampler2D inputImageTexture;
24 | uniform float gamma;
25 |
26 | void main()
27 | {
28 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
29 |
30 | gl_FragColor = vec4(pow(textureColor.rgb, vec3(gamma)), textureColor.w);
31 | }
32 | );
33 | #endif
34 |
35 | @implementation GPUImageGammaFilter
36 |
37 | @synthesize gamma = _gamma;
38 |
39 | #pragma mark -
40 | #pragma mark Initialization and teardown
41 |
42 | - (id)init;
43 | {
44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageGammaFragmentShaderString]))
45 | {
46 | return nil;
47 | }
48 |
49 | gammaUniform = [filterProgram uniformIndex:@"gamma"];
50 | self.gamma = 1.0;
51 |
52 | return self;
53 | }
54 |
55 | #pragma mark -
56 | #pragma mark Accessors
57 |
58 | - (void)setGamma:(CGFloat)newValue;
59 | {
60 | _gamma = newValue;
61 |
62 | [self setFloat:_gamma forUniform:gammaUniform program:filterProgram];
63 | }
64 |
65 | @end
66 |
67 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageGaussianBlurPositionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoPassTextureSamplingFilter.h"
2 |
3 | /** A more generalized 9x9 Gaussian blur filter
4 | */
5 | @interface GPUImageGaussianBlurPositionFilter : GPUImageTwoPassTextureSamplingFilter
6 | {
7 | GLint blurCenterUniform, blurRadiusUniform, aspectRatioUniform;
8 | }
9 |
10 | /** A multiplier for the blur size, ranging from 0.0 on up, with a default of 1.0
11 | */
12 | @property (readwrite, nonatomic) CGFloat blurSize;
13 |
14 | /** Center for the blur, defaults to 0.5, 0.5
15 | */
16 | @property (readwrite, nonatomic) CGPoint blurCenter;
17 |
18 | /** Radius for the blur, defaults to 1.0
19 | */
20 | @property (readwrite, nonatomic) CGFloat blurRadius;
21 |
22 | @end
23 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageGaussianSelectiveBlurFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImageGaussianBlurFilter;
4 |
5 | /** A Gaussian blur that preserves focus within a circular region
6 | */
7 | @interface GPUImageGaussianSelectiveBlurFilter : GPUImageFilterGroup
8 | {
9 | GPUImageGaussianBlurFilter *blurFilter;
10 | GPUImageFilter *selectiveFocusFilter;
11 | BOOL hasOverriddenAspectRatio;
12 | }
13 |
14 | /** The radius of the circular area being excluded from the blur
15 | */
16 | @property (readwrite, nonatomic) CGFloat excludeCircleRadius;
17 | /** The center of the circular area being excluded from the blur
18 | */
19 | @property (readwrite, nonatomic) CGPoint excludeCirclePoint;
20 | /** The size of the area between the blurred portion and the clear circle
21 | */
22 | @property (readwrite, nonatomic) CGFloat excludeBlurSize;
23 | /** A radius in pixels to use for the blur, with a default of 5.0. This adjusts the sigma variable in the Gaussian distribution function.
24 | */
25 | @property (readwrite, nonatomic) CGFloat blurRadiusInPixels;
26 | /** The aspect ratio of the image, used to adjust the circularity of the in-focus region. By default, this matches the image aspect ratio, but you can override this value.
27 | */
28 | @property (readwrite, nonatomic) CGFloat aspectRatio;
29 |
30 | @end
31 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageGlassSphereFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageSphereRefractionFilter.h"
2 |
3 | @interface GPUImageGlassSphereFilter : GPUImageSphereRefractionFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageGrayscaleFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | extern NSString *const kGPUImageLuminanceFragmentShaderString;
4 |
5 | /** Converts an image to grayscale (a slightly faster implementation of the saturation filter, without the ability to vary the color contribution)
6 | */
7 | @interface GPUImageGrayscaleFilter : GPUImageFilter
8 |
9 | @end
10 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHSBFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageColorMatrixFilter.h"
2 |
3 | @interface GPUImageHSBFilter : GPUImageColorMatrixFilter
4 |
5 | /** Reset the filter to have no transformations.
6 | */
7 | - (void)reset;
8 |
9 | /** Add a hue rotation to the filter.
10 | The hue rotation is in the range [-360, 360] with 0 being no-change.
11 | Note that this adjustment is additive, so use the reset method if you need to.
12 | */
13 | - (void)rotateHue:(float)h;
14 |
15 | /** Add a saturation adjustment to the filter.
16 | The saturation adjustment is in the range [0.0, 2.0] with 1.0 being no-change.
17 | Note that this adjustment is additive, so use the reset method if you need to.
18 | */
19 | - (void)adjustSaturation:(float)s;
20 |
21 | /** Add a brightness adjustment to the filter.
22 | The brightness adjustment is in the range [0.0, 2.0] with 1.0 being no-change.
23 | Note that this adjustment is additive, so use the reset method if you need to.
24 | */
25 | - (void)adjustBrightness:(float)b;
26 |
27 | @end
28 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHalftoneFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImagePixellateFilter.h"
2 |
3 | @interface GPUImageHalftoneFilter : GPUImagePixellateFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHardLightBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageHardLightBlendFilter : GPUImageTwoInputFilter
4 | {
5 | }
6 |
7 | @end
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHazeFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /*
4 | * The haze filter can be used to add or remove haze (similar to a UV filter)
5 | *
6 | * @author Alaric Cole
7 | * @creationDate 03/10/12
8 | *
9 | */
10 |
11 | /** The haze filter can be used to add or remove haze
12 |
13 | This is similar to a UV filter
14 | */
15 | @interface GPUImageHazeFilter : GPUImageFilter
16 | {
17 | GLint distanceUniform;
18 | GLint slopeUniform;
19 | }
20 |
21 | /** Strength of the color applied. Default 0. Values between -.3 and .3 are best
22 | */
23 | @property(readwrite, nonatomic) CGFloat distance;
24 |
25 | /** Amount of color change. Default 0. Values between -.3 and .3 are best
26 | */
27 | @property(readwrite, nonatomic) CGFloat slope;
28 |
29 | @end
30 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHighPassFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 | #import "GPUImageLowPassFilter.h"
3 | #import "GPUImageDifferenceBlendFilter.h"
4 |
5 | @interface GPUImageHighPassFilter : GPUImageFilterGroup
6 | {
7 | GPUImageLowPassFilter *lowPassFilter;
8 | GPUImageDifferenceBlendFilter *differenceBlendFilter;
9 | }
10 |
11 | // This controls the degree by which the previous accumulated frames are blended and then subtracted from the current one. This ranges from 0.0 to 1.0, with a default of 0.5.
12 | @property(readwrite, nonatomic) CGFloat filterStrength;
13 |
14 | @end
15 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHighPassFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageHighPassFilter.h"
2 |
3 | @implementation GPUImageHighPassFilter
4 |
5 | @synthesize filterStrength;
6 |
7 | - (id)init;
8 | {
9 | if (!(self = [super init]))
10 | {
11 | return nil;
12 | }
13 |
14 | // Start with a low pass filter to define the component to be removed
15 | lowPassFilter = [[GPUImageLowPassFilter alloc] init];
16 | [self addFilter:lowPassFilter];
17 |
18 | // Take the difference of the current frame from the low pass filtered result to get the high pass
19 | differenceBlendFilter = [[GPUImageDifferenceBlendFilter alloc] init];
20 | [self addFilter:differenceBlendFilter];
21 |
22 | // Texture location 0 needs to be the original image for the difference blend
23 | [lowPassFilter addTarget:differenceBlendFilter atTextureLocation:1];
24 |
25 | self.initialFilters = [NSArray arrayWithObjects:lowPassFilter, differenceBlendFilter, nil];
26 | self.terminalFilter = differenceBlendFilter;
27 |
28 | self.filterStrength = 0.5;
29 |
30 | return self;
31 | }
32 |
33 | #pragma mark -
34 | #pragma mark Accessors
35 |
36 | - (void)setFilterStrength:(CGFloat)newValue;
37 | {
38 | lowPassFilter.filterStrength = newValue;
39 | }
40 |
41 | - (CGFloat)filterStrength;
42 | {
43 | return lowPassFilter.filterStrength;
44 | }
45 |
46 | @end
47 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHighlightShadowFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageHighlightShadowFilter : GPUImageFilter
4 | {
5 | GLint shadowsUniform, highlightsUniform;
6 | }
7 |
8 | /**
9 | * 0 - 1, increase to lighten shadows.
10 | * @default 0
11 | */
12 | @property(readwrite, nonatomic) CGFloat shadows;
13 |
14 | /**
15 | * 0 - 1, decrease to darken highlights.
16 | * @default 1
17 | */
18 | @property(readwrite, nonatomic) CGFloat highlights;
19 |
20 | @end
21 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHighlightShadowTintFilter.h:
--------------------------------------------------------------------------------
1 | //
2 | // GPUImageHighlightShadowTintFilter.h
3 | //
4 | //
5 | // Created by github.com/r3mus on 8/14/15.
6 | //
7 | //
8 |
9 | #import "GPUImageFilter.h"
10 |
11 | @interface GPUImageHighlightShadowTintFilter : GPUImageFilter
12 | {
13 | GLint shadowTintIntensityUniform, highlightTintIntensityUniform, shadowTintColorUniform, highlightTintColorUniform;
14 | }
15 |
16 | // The shadowTint and highlightTint colors specify what colors replace the dark and light areas of the image, respectively. The defaults for shadows are black, highlighs white.
17 | @property(readwrite, nonatomic) GLfloat shadowTintIntensity;
18 | @property(readwrite, nonatomic) GPUVector4 shadowTintColor;
19 | @property(readwrite, nonatomic) GLfloat highlightTintIntensity;
20 | @property(readwrite, nonatomic) GPUVector4 highlightTintColor;
21 |
22 | - (void)setShadowTintColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent;
23 | - (void)setHighlightTintColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent;
24 |
25 | @end
26 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHistogramEqualizationFilter.h:
--------------------------------------------------------------------------------
1 | //
2 | // GPUImageHistogramEqualizationFilter.h
3 | // FilterShowcase
4 | //
5 | // Created by Adam Marcus on 19/08/2014.
6 | // Copyright (c) 2014 Sunset Lake Software LLC. All rights reserved.
7 | //
8 |
9 | #import "GPUImageFilterGroup.h"
10 | #import "GPUImageHistogramFilter.h"
11 | #import "GPUImageRawDataOutput.h"
12 | #import "GPUImageRawDataInput.h"
13 | #import "GPUImageTwoInputFilter.h"
14 |
15 | @interface GPUImageHistogramEqualizationFilter : GPUImageFilterGroup
16 | {
17 | GPUImageHistogramFilter *histogramFilter;
18 | GPUImageRawDataOutput *rawDataOutputFilter;
19 | GPUImageRawDataInput *rawDataInputFilter;
20 | }
21 |
22 | @property(readwrite, nonatomic) NSUInteger downsamplingFactor;
23 |
24 | - (id)initWithHistogramType:(GPUImageHistogramType)newHistogramType;
25 |
26 | @end
27 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHistogramFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | typedef enum { kGPUImageHistogramRed, kGPUImageHistogramGreen, kGPUImageHistogramBlue, kGPUImageHistogramRGB, kGPUImageHistogramLuminance} GPUImageHistogramType;
4 |
5 | @interface GPUImageHistogramFilter : GPUImageFilter
6 | {
7 | GPUImageHistogramType histogramType;
8 |
9 | GLubyte *vertexSamplingCoordinates;
10 |
11 | GLProgram *secondFilterProgram, *thirdFilterProgram;
12 | GLint secondFilterPositionAttribute, thirdFilterPositionAttribute;
13 | }
14 |
15 | // Rather than sampling every pixel, this dictates what fraction of the image is sampled. By default, this is 16 with a minimum of 1.
16 | @property(readwrite, nonatomic) NSUInteger downsamplingFactor;
17 |
18 | // Initialization and teardown
19 | - (id)initWithHistogramType:(GPUImageHistogramType)newHistogramType;
20 | - (void)initializeSecondaryAttributes;
21 |
22 | @end
23 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHistogramGenerator.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageHistogramGenerator : GPUImageFilter
4 | {
5 | GLint backgroundColorUniform;
6 | }
7 |
8 | @end
9 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHueBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageHueBlendFilter : GPUImageTwoInputFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageHueFilter.h:
--------------------------------------------------------------------------------
1 |
2 | #import "GPUImageFilter.h"
3 |
4 | @interface GPUImageHueFilter : GPUImageFilter
5 | {
6 | GLint hueAdjustUniform;
7 |
8 | }
9 | @property (nonatomic, readwrite) CGFloat hue;
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageJFAVoronoiFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageJFAVoronoiFilter : GPUImageFilter
4 | {
5 | GLuint secondFilterOutputTexture;
6 | GLuint secondFilterFramebuffer;
7 |
8 |
9 | GLint sampleStepUniform;
10 | GLint sizeUniform;
11 | NSUInteger numPasses;
12 |
13 | }
14 |
15 | @property (nonatomic, readwrite) CGSize sizeInPixels;
16 |
17 | @end
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageKuwaharaFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /** Kuwahara image abstraction, drawn from the work of Kyprianidis, et. al. in their publication "Anisotropic Kuwahara Filtering on the GPU" within the GPU Pro collection. This produces an oil-painting-like image, but it is extremely computationally expensive, so it can take seconds to render a frame on an iPad 2. This might be best used for still images.
4 | */
5 | @interface GPUImageKuwaharaFilter : GPUImageFilter
6 | {
7 | GLint radiusUniform;
8 | }
9 |
10 | /// The radius to sample from when creating the brush-stroke effect, with a default of 3. The larger the radius, the slower the filter.
11 | @property(readwrite, nonatomic) NSUInteger radius;
12 |
13 | @end
14 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageKuwaharaRadius3Filter.h:
--------------------------------------------------------------------------------
1 | //
2 | // GPUImageKuwaharaRadius3Filter.h
3 |
4 | #import "GPUImageFilter.h"
5 |
6 | @interface GPUImageKuwaharaRadius3Filter : GPUImageFilter
7 |
8 | @end
9 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLanczosResamplingFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoPassTextureSamplingFilter.h"
2 |
3 | @interface GPUImageLanczosResamplingFilter : GPUImageTwoPassTextureSamplingFilter
4 |
5 | @property(readwrite, nonatomic) CGSize originalImageSize;
6 |
7 | @end
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLaplacianFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImage3x3ConvolutionFilter.h"
2 |
3 | @interface GPUImageLaplacianFilter : GPUImage3x3ConvolutionFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLightenBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | /// Blends two images by taking the maximum value of each color component between the images
4 | @interface GPUImageLightenBlendFilter : GPUImageTwoInputFilter
5 | {
6 | }
7 |
8 | @end
9 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLightenBlendFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageLightenBlendFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageLightenBlendFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 | varying highp vec2 textureCoordinate2;
8 |
9 | uniform sampler2D inputImageTexture;
10 | uniform sampler2D inputImageTexture2;
11 |
12 | void main()
13 | {
14 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
15 | lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
16 |
17 | gl_FragColor = max(textureColor, textureColor2);
18 | }
19 | );
20 | #else
21 | NSString *const kGPUImageLightenBlendFragmentShaderString = SHADER_STRING
22 | (
23 | varying vec2 textureCoordinate;
24 | varying vec2 textureCoordinate2;
25 |
26 | uniform sampler2D inputImageTexture;
27 | uniform sampler2D inputImageTexture2;
28 |
29 | void main()
30 | {
31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
33 |
34 | gl_FragColor = max(textureColor, textureColor2);
35 | }
36 | );
37 | #endif
38 |
39 | @implementation GPUImageLightenBlendFilter
40 |
41 | - (id)init;
42 | {
43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageLightenBlendFragmentShaderString]))
44 | {
45 | return nil;
46 | }
47 |
48 | return self;
49 | }
50 |
51 | @end
52 |
53 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLineGenerator.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageLineGenerator : GPUImageFilter
4 | {
5 | GLint lineWidthUniform, lineColorUniform;
6 | GLfloat *lineCoordinates;
7 | }
8 |
9 | // The width of the displayed lines, in pixels. The default is 1.
10 | @property(readwrite, nonatomic) CGFloat lineWidth;
11 |
12 | // The color of the lines is specified using individual red, green, and blue components (normalized to 1.0). The default is green: (0.0, 1.0, 0.0).
13 | - (void)setLineColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent;
14 |
15 | // Rendering
16 | - (void)renderLinesFromArray:(GLfloat *)lineSlopeAndIntercepts count:(NSUInteger)numberOfLines frameTime:(CMTime)frameTime;
17 |
18 | @end
19 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLinearBurnBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageLinearBurnBlendFilter : GPUImageTwoInputFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLinearBurnBlendFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageLinearBurnBlendFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageLinearBurnBlendFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 | varying highp vec2 textureCoordinate2;
8 |
9 | uniform sampler2D inputImageTexture;
10 | uniform sampler2D inputImageTexture2;
11 |
12 | void main()
13 | {
14 | mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
15 | mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
16 |
17 | gl_FragColor = vec4(clamp(textureColor.rgb + textureColor2.rgb - vec3(1.0), vec3(0.0), vec3(1.0)), textureColor.a);
18 | }
19 | );
20 | #else
21 | NSString *const kGPUImageLinearBurnBlendFragmentShaderString = SHADER_STRING
22 | (
23 | varying vec2 textureCoordinate;
24 | varying vec2 textureCoordinate2;
25 |
26 | uniform sampler2D inputImageTexture;
27 | uniform sampler2D inputImageTexture2;
28 |
29 | void main()
30 | {
31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
33 |
34 | gl_FragColor = vec4(clamp(textureColor.rgb + textureColor2.rgb - vec3(1.0), vec3(0.0), vec3(1.0)), textureColor.a);
35 | }
36 | );
37 | #endif
38 |
39 | @implementation GPUImageLinearBurnBlendFilter
40 |
41 | - (id)init;
42 | {
43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageLinearBurnBlendFragmentShaderString]))
44 | {
45 | return nil;
46 | }
47 |
48 | return self;
49 | }
50 |
51 | @end
52 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLocalBinaryPatternFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImage3x3TextureSamplingFilter.h"
2 |
3 | @interface GPUImageLocalBinaryPatternFilter : GPUImage3x3TextureSamplingFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLookupFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageLookupFilter : GPUImageTwoInputFilter
4 | {
5 | GLint intensityUniform;
6 | }
7 |
8 | // How To Use:
9 | // 1) Use your favourite photo editing application to apply a filter to lookup.png from GPUImage/framework/Resources.
10 | // For this to work properly each pixel color must not depend on other pixels (e.g. blur will not work).
11 | // If you need more complex filter you can create as many lookup tables as required.
12 | // E.g. color_balance_lookup_1.png -> GPUImageGaussianBlurFilter -> color_balance_lookup_2.png
13 | // 2) Use you new lookup.png file as a second input for GPUImageLookupFilter.
14 |
15 | // See GPUImageAmatorkaFilter, GPUImageMissEtikateFilter, and GPUImageSoftEleganceFilter for example.
16 |
17 | // Additional Info:
18 | // Lookup texture is organised as 8x8 quads of 64x64 pixels representing all possible RGB colors:
19 | //for (int by = 0; by < 8; by++) {
20 | // for (int bx = 0; bx < 8; bx++) {
21 | // for (int g = 0; g < 64; g++) {
22 | // for (int r = 0; r < 64; r++) {
23 | // image.setPixel(r + bx * 64, g + by * 64, qRgb((int)(r * 255.0 / 63.0 + 0.5),
24 | // (int)(g * 255.0 / 63.0 + 0.5),
25 | // (int)((bx + by * 8.0) * 255.0 / 63.0 + 0.5)));
26 | // }
27 | // }
28 | // }
29 | //}
30 |
31 | // Opacity/intensity of lookup filter ranges from 0.0 to 1.0, with 1.0 as the normal setting
32 | @property(readwrite, nonatomic) CGFloat intensity;
33 |
34 | @end
35 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLowPassFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 | #import "GPUImageBuffer.h"
3 | #import "GPUImageDissolveBlendFilter.h"
4 |
5 | @interface GPUImageLowPassFilter : GPUImageFilterGroup
6 | {
7 | GPUImageBuffer *bufferFilter;
8 | GPUImageDissolveBlendFilter *dissolveBlendFilter;
9 | }
10 |
11 | // This controls the degree by which the previous accumulated frames are blended with the current one. This ranges from 0.0 to 1.0, with a default of 0.5.
12 | @property(readwrite, nonatomic) CGFloat filterStrength;
13 |
14 | @end
15 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLuminanceRangeFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageLuminanceRangeFilter : GPUImageFilter
4 | {
5 | GLint rangeReductionUniform;
6 | }
7 |
8 | /** The degree to reduce the luminance range, from 0.0 to 1.0. Default is 0.6.
9 | */
10 | @property(readwrite, nonatomic) CGFloat rangeReductionFactor;
11 |
12 | @end
13 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLuminanceThresholdFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /** Pixels with a luminance above the threshold will appear white, and those below will be black
4 | */
5 | @interface GPUImageLuminanceThresholdFilter : GPUImageFilter
6 | {
7 | GLint thresholdUniform;
8 | }
9 |
10 | /** Anything above this luminance will be white, and anything below black. Ranges from 0.0 to 1.0, with 0.5 as the default
11 | */
12 | @property(readwrite, nonatomic) CGFloat threshold;
13 |
14 | @end
15 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLuminosity.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageAverageColor.h"
2 |
3 | @interface GPUImageLuminosity : GPUImageAverageColor
4 | {
5 | GLProgram *secondFilterProgram;
6 | GLint secondFilterPositionAttribute, secondFilterTextureCoordinateAttribute;
7 | GLint secondFilterInputTextureUniform, secondFilterInputTextureUniform2;
8 | GLint secondFilterTexelWidthUniform, secondFilterTexelHeightUniform;
9 | }
10 |
11 | // This block is called on the completion of color averaging for a frame
12 | @property(nonatomic, copy) void(^luminosityProcessingFinishedBlock)(CGFloat luminosity, CMTime frameTime);
13 |
14 | - (void)extractLuminosityAtFrameTime:(CMTime)frameTime;
15 | - (void)initializeSecondaryAttributes;
16 |
17 | @end
18 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageLuminosityBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageLuminosityBlendFilter : GPUImageTwoInputFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageMaskFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageMaskFilter : GPUImageTwoInputFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageMedianFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImage3x3TextureSamplingFilter.h"
2 |
3 | @interface GPUImageMedianFilter : GPUImage3x3TextureSamplingFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageMissEtikateFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImagePicture;
4 |
5 | /** A photo filter based on Photoshop action by Miss Etikate:
6 | http://miss-etikate.deviantart.com/art/Photoshop-Action-15-120151961
7 | */
8 |
9 | // Note: If you want to use this effect you have to add lookup_miss_etikate.png
10 | // from Resources folder to your application bundle.
11 |
12 | @interface GPUImageMissEtikateFilter : GPUImageFilterGroup
13 | {
14 | GPUImagePicture *lookupImageSource;
15 | }
16 |
17 | @end
18 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageMissEtikateFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageMissEtikateFilter.h"
2 | #import "GPUImagePicture.h"
3 | #import "GPUImageLookupFilter.h"
4 |
5 | @implementation GPUImageMissEtikateFilter
6 |
7 | - (id)init;
8 | {
9 | if (!(self = [super init]))
10 | {
11 | return nil;
12 | }
13 |
14 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
15 | UIImage *image = [UIImage imageNamed:@"lookup_miss_etikate.png"];
16 | #else
17 | NSImage *image = [NSImage imageNamed:@"lookup_miss_etikate.png"];
18 | #endif
19 |
20 | NSAssert(image, @"To use GPUImageMissEtikateFilter you need to add lookup_miss_etikate.png from GPUImage/framework/Resources to your application bundle.");
21 |
22 | lookupImageSource = [[GPUImagePicture alloc] initWithImage:image];
23 | GPUImageLookupFilter *lookupFilter = [[GPUImageLookupFilter alloc] init];
24 | [self addFilter:lookupFilter];
25 |
26 | [lookupImageSource addTarget:lookupFilter atTextureLocation:1];
27 | [lookupImageSource processImage];
28 |
29 | self.initialFilters = [NSArray arrayWithObjects:lookupFilter, nil];
30 | self.terminalFilter = lookupFilter;
31 |
32 | return self;
33 | }
34 |
35 | #pragma mark -
36 | #pragma mark Accessors
37 |
38 | @end
39 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageMonochromeFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageMonochromeFilter : GPUImageFilter
4 | {
5 | GLint intensityUniform, filterColorUniform;
6 | }
7 |
8 | @property(readwrite, nonatomic) CGFloat intensity;
9 | @property(readwrite, nonatomic) GPUVector4 color;
10 |
11 | - (void)setColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent;
12 |
13 | @end
14 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageMosaicFilter.h:
--------------------------------------------------------------------------------
1 |
2 | // This needs a little more work, it's rotating the input tileset and there are some artifacts (I think from GL_LINEAR interpolation), but it's working
3 |
4 | #import "GPUImageTwoInputFilter.h"
5 | #import "GPUImagePicture.h"
6 |
7 | @interface GPUImageMosaicFilter : GPUImageTwoInputFilter {
8 | GLint inputTileSizeUniform, numTilesUniform, displayTileSizeUniform, colorOnUniform;
9 | GPUImagePicture *pic;
10 | }
11 |
12 | // This filter takes an input tileset, the tiles must ascend in luminance
13 | // It looks at the input image and replaces each display tile with an input tile
14 | // according to the luminance of that tile. The idea was to replicate the ASCII
15 | // video filters seen in other apps, but the tileset can be anything.
16 | @property(readwrite, nonatomic) CGSize inputTileSize;
17 | @property(readwrite, nonatomic) float numTiles;
18 | @property(readwrite, nonatomic) CGSize displayTileSize;
19 | @property(readwrite, nonatomic) BOOL colorOn;
20 | @property(readwrite, nonatomic, copy) NSString *tileSet;
21 |
22 | @end
23 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageMotionBlurFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageMotionBlurFilter : GPUImageFilter
4 |
5 | /** A multiplier for the blur size, ranging from 0.0 on up, with a default of 1.0
6 | */
7 | @property (readwrite, nonatomic) CGFloat blurSize;
8 |
9 | /** The angular direction of the blur, in degrees. 0 degrees by default
10 | */
11 | @property (readwrite, nonatomic) CGFloat blurAngle;
12 |
13 | @end
14 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageMotionDetector.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 | #import "GPUImageLowPassFilter.h"
3 | #import "GPUImageAverageColor.h"
4 |
5 | @interface GPUImageMotionDetector : GPUImageFilterGroup
6 | {
7 | GPUImageLowPassFilter *lowPassFilter;
8 | GPUImageTwoInputFilter *frameComparisonFilter;
9 | GPUImageAverageColor *averageColor;
10 | }
11 |
12 | // This controls the low pass filter strength used to compare the current frame with previous ones to detect motion. This ranges from 0.0 to 1.0, with a default of 0.5.
13 | @property(readwrite, nonatomic) CGFloat lowPassFilterStrength;
14 |
15 | // For every frame, this will feed back the calculated centroid of the motion, as well as a relative intensity.
16 | @property(nonatomic, copy) void(^motionDetectionBlock)(CGPoint motionCentroid, CGFloat motionIntensity, CMTime frameTime);
17 |
18 | @end
19 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageMovieComposition.h:
--------------------------------------------------------------------------------
1 | //
2 | // GPUImageMovieComposition.h
3 | // Givit
4 | //
5 | // Created by Sean Meiners on 2013/01/25.
6 | //
7 | //
8 |
9 | #import "GPUImageMovie.h"
10 |
11 | @interface GPUImageMovieComposition : GPUImageMovie
12 |
13 | @property (readwrite, retain) AVComposition *compositon;
14 | @property (readwrite, retain) AVVideoComposition *videoComposition;
15 | @property (readwrite, retain) AVAudioMix *audioMix;
16 |
17 | - (id)initWithComposition:(AVComposition*)compositon
18 | andVideoComposition:(AVVideoComposition*)videoComposition
19 | andAudioMix:(AVAudioMix*)audioMix;
20 |
21 | @end
22 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageMultiplyBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageMultiplyBlendFilter : GPUImageTwoInputFilter
4 | {
5 | }
6 |
7 | @end
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageMultiplyBlendFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageMultiplyBlendFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageMultiplyBlendFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 | varying highp vec2 textureCoordinate2;
8 |
9 | uniform sampler2D inputImageTexture;
10 | uniform sampler2D inputImageTexture2;
11 |
12 | void main()
13 | {
14 | lowp vec4 base = texture2D(inputImageTexture, textureCoordinate);
15 | lowp vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2);
16 |
17 | gl_FragColor = overlayer * base + overlayer * (1.0 - base.a) + base * (1.0 - overlayer.a);
18 | }
19 | );
20 | #else
21 | NSString *const kGPUImageMultiplyBlendFragmentShaderString = SHADER_STRING
22 | (
23 | varying vec2 textureCoordinate;
24 | varying vec2 textureCoordinate2;
25 |
26 | uniform sampler2D inputImageTexture;
27 | uniform sampler2D inputImageTexture2;
28 |
29 | void main()
30 | {
31 | vec4 base = texture2D(inputImageTexture, textureCoordinate);
32 | vec4 overlayer = texture2D(inputImageTexture2, textureCoordinate2);
33 |
34 | gl_FragColor = overlayer * base + overlayer * (1.0 - base.a) + base * (1.0 - overlayer.a);
35 | }
36 | );
37 | #endif
38 |
39 | @implementation GPUImageMultiplyBlendFilter
40 |
41 | - (id)init;
42 | {
43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageMultiplyBlendFragmentShaderString]))
44 | {
45 | return nil;
46 | }
47 |
48 | return self;
49 | }
50 |
51 | @end
52 |
53 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageNobleCornerDetectionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageHarrisCornerDetectionFilter.h"
2 |
3 | /** Noble corner detector
4 |
5 | This is the Noble variant on the Harris detector, from
6 | Alison Noble, "Descriptions of Image Surfaces", PhD thesis, Department of Engineering Science, Oxford University 1989, p45.
7 | */
8 |
9 |
10 | @interface GPUImageNobleCornerDetectionFilter : GPUImageHarrisCornerDetectionFilter
11 |
12 | @end
13 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageNonMaximumSuppressionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImage3x3TextureSamplingFilter.h"
2 |
3 | @interface GPUImageNonMaximumSuppressionFilter : GPUImage3x3TextureSamplingFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageNormalBlendFilter.h:
--------------------------------------------------------------------------------
1 | // Created by Jorge Garcia on 9/5/12.
2 | //
3 |
4 | #import "GPUImageTwoInputFilter.h"
5 |
6 | @interface GPUImageNormalBlendFilter : GPUImageTwoInputFilter
7 |
8 | @end
9 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageOpacityFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageOpacityFilter : GPUImageFilter
4 | {
5 | GLint opacityUniform;
6 | }
7 |
8 | // Opacity ranges from 0.0 to 1.0, with 1.0 as the normal setting
9 | @property(readwrite, nonatomic) CGFloat opacity;
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageOpacityFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageOpacityFilter.h"
2 |
3 | @implementation GPUImageOpacityFilter
4 |
5 | @synthesize opacity = _opacity;
6 |
7 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
8 | NSString *const kGPUImageOpacityFragmentShaderString = SHADER_STRING
9 | (
10 | varying highp vec2 textureCoordinate;
11 |
12 | uniform sampler2D inputImageTexture;
13 | uniform lowp float opacity;
14 |
15 | void main()
16 | {
17 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
18 |
19 | gl_FragColor = vec4(textureColor.rgb, textureColor.a * opacity);
20 | }
21 | );
22 | #else
23 | NSString *const kGPUImageOpacityFragmentShaderString = SHADER_STRING
24 | (
25 | varying vec2 textureCoordinate;
26 |
27 | uniform sampler2D inputImageTexture;
28 | uniform float opacity;
29 |
30 | void main()
31 | {
32 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
33 |
34 | gl_FragColor = vec4(textureColor.rgb, textureColor.a * opacity);
35 | }
36 | );
37 | #endif
38 |
39 | #pragma mark -
40 | #pragma mark Initialization and teardown
41 |
42 | - (id)init;
43 | {
44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageOpacityFragmentShaderString]))
45 | {
46 | return nil;
47 | }
48 |
49 | opacityUniform = [filterProgram uniformIndex:@"opacity"];
50 | self.opacity = 1.0;
51 |
52 | return self;
53 | }
54 |
55 | #pragma mark -
56 | #pragma mark Accessors
57 |
58 | - (void)setOpacity:(CGFloat)newValue;
59 | {
60 | _opacity = newValue;
61 |
62 | [self setFloat:_opacity forUniform:opacityUniform program:filterProgram];
63 | }
64 |
65 | @end
66 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageOpeningFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImageErosionFilter;
4 | @class GPUImageDilationFilter;
5 |
6 | // A filter that first performs an erosion on the red channel of an image, followed by a dilation of the same radius.
7 | // This helps to filter out smaller bright elements.
8 |
9 | @interface GPUImageOpeningFilter : GPUImageFilterGroup
10 | {
11 | GPUImageErosionFilter *erosionFilter;
12 | GPUImageDilationFilter *dilationFilter;
13 | }
14 |
15 | @property(readwrite, nonatomic) CGFloat verticalTexelSpacing, horizontalTexelSpacing;
16 |
17 | - (id)initWithRadius:(NSUInteger)radius;
18 |
19 | @end
20 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageOpeningFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageOpeningFilter.h"
2 | #import "GPUImageErosionFilter.h"
3 | #import "GPUImageDilationFilter.h"
4 |
5 | @implementation GPUImageOpeningFilter
6 |
7 | @synthesize verticalTexelSpacing = _verticalTexelSpacing;
8 | @synthesize horizontalTexelSpacing = _horizontalTexelSpacing;
9 |
10 | - (id)init;
11 | {
12 | if (!(self = [self initWithRadius:1]))
13 | {
14 | return nil;
15 | }
16 |
17 | return self;
18 | }
19 |
20 | - (id)initWithRadius:(NSUInteger)radius;
21 | {
22 | if (!(self = [super init]))
23 | {
24 | return nil;
25 | }
26 |
27 | // First pass: erosion
28 | erosionFilter = [[GPUImageErosionFilter alloc] initWithRadius:radius];
29 | [self addFilter:erosionFilter];
30 |
31 | // Second pass: dilation
32 | dilationFilter = [[GPUImageDilationFilter alloc] initWithRadius:radius];
33 | [self addFilter:dilationFilter];
34 |
35 | [erosionFilter addTarget:dilationFilter];
36 |
37 | self.initialFilters = [NSArray arrayWithObjects:erosionFilter, nil];
38 | self.terminalFilter = dilationFilter;
39 |
40 | return self;
41 | }
42 |
43 | - (void)setVerticalTexelSpacing:(CGFloat)newValue;
44 | {
45 | _verticalTexelSpacing = newValue;
46 | erosionFilter.verticalTexelSpacing = newValue;
47 | dilationFilter.verticalTexelSpacing = newValue;
48 | }
49 |
50 | - (void)setHorizontalTexelSpacing:(CGFloat)newValue;
51 | {
52 | _horizontalTexelSpacing = newValue;
53 | erosionFilter.horizontalTexelSpacing = newValue;
54 | dilationFilter.horizontalTexelSpacing = newValue;
55 | }
56 |
57 | @end
58 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageOverlayBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageOverlayBlendFilter : GPUImageTwoInputFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageParallelCoordinateLineTransformFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | // This is an accumulator that uses a Hough transform in parallel coordinate space to identify probable lines in a scene.
4 | //
5 | // It is entirely based on the work of the Graph@FIT research group at the Brno University of Technology and their publications:
6 | // M. Dubská, J. Havel, and A. Herout. Real-Time Detection of Lines using Parallel Coordinates and OpenGL. Proceedings of SCCG 2011, Bratislava, SK, p. 7.
7 | // M. Dubská, J. Havel, and A. Herout. PClines — Line detection using parallel coordinates. 2011 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), p. 1489- 1494.
8 |
9 | @interface GPUImageParallelCoordinateLineTransformFilter : GPUImageFilter
10 | {
11 | GLubyte *rawImagePixels;
12 | GLfloat *lineCoordinates;
13 | unsigned int maxLinePairsToRender, linePairsToRender;
14 | }
15 |
16 | @end
17 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImagePerlinNoiseFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImagePerlinNoiseFilter : GPUImageFilter
4 | {
5 | GLint scaleUniform, colorStartUniform, colorFinishUniform;
6 | }
7 |
8 | @property (readwrite, nonatomic) GPUVector4 colorStart;
9 | @property (readwrite, nonatomic) GPUVector4 colorFinish;
10 |
11 | @property (readwrite, nonatomic) float scale;
12 |
13 | @end
14 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImagePinchDistortionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /** Creates a pinch distortion of the image
4 | */
5 | @interface GPUImagePinchDistortionFilter : GPUImageFilter
6 | {
7 | GLint aspectRatioUniform, radiusUniform, centerUniform, scaleUniform;
8 | }
9 |
10 | /** The center about which to apply the distortion, with a default of (0.5, 0.5)
11 | */
12 | @property(readwrite, nonatomic) CGPoint center;
13 | /** The radius of the distortion, ranging from 0.0 to 2.0, with a default of 1.0
14 | */
15 | @property(readwrite, nonatomic) CGFloat radius;
16 | /** The amount of distortion to apply, from -2.0 to 2.0, with a default of 0.5
17 | */
18 | @property(readwrite, nonatomic) CGFloat scale;
19 |
20 | @end
21 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImagePixellateFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImagePixellateFilter : GPUImageFilter
4 | {
5 | GLint fractionalWidthOfAPixelUniform, aspectRatioUniform;
6 | }
7 |
8 | // The fractional width of the image to use as a size for the pixels in the resulting image. Values below one pixel width in the source image are ignored.
9 | @property(readwrite, nonatomic) CGFloat fractionalWidthOfAPixel;
10 |
11 |
12 | @end
13 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImagePixellatePositionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImagePixellatePositionFilter : GPUImageFilter
4 | {
5 | GLint fractionalWidthOfAPixelUniform, aspectRatioUniform, centerUniform, radiusUniform;
6 | }
7 |
8 | // The fractional width of the image to use as a size for the pixels in the resulting image. Values below one pixel width in the source image are ignored.
9 | @property(readwrite, nonatomic) CGFloat fractionalWidthOfAPixel;
10 |
11 | // the center point to start pixelation in texture coordinates, default 0.5, 0.5
12 | @property(readwrite, nonatomic) CGPoint center;
13 |
14 | // the radius (0.0 - 1.0) in which to pixelate, default 1.0
15 | @property(readwrite, nonatomic) CGFloat radius;
16 |
17 | @end
18 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImagePoissonBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputCrossTextureSamplingFilter.h"
2 | #import "GPUImageFilterGroup.h"
3 |
4 | @interface GPUImagePoissonBlendFilter : GPUImageTwoInputCrossTextureSamplingFilter
5 | {
6 | GLint mixUniform;
7 |
8 | GPUImageFramebuffer *secondOutputFramebuffer;
9 | }
10 |
11 | // Mix ranges from 0.0 (only image 1) to 1.0 (only image 2 gradients), with 1.0 as the normal level
12 | @property(readwrite, nonatomic) CGFloat mix;
13 |
14 | // The number of times to propagate the gradients.
15 | // Crank this up to 100 or even 1000 if you want to get anywhere near convergence. Yes, this will be slow.
16 | @property(readwrite, nonatomic) NSUInteger numIterations;
17 |
18 | @end
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImagePolarPixellateFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImagePolarPixellateFilter : GPUImageFilter {
4 | GLint centerUniform, pixelSizeUniform;
5 | }
6 |
7 | // The center about which to apply the distortion, with a default of (0.5, 0.5)
8 | @property(readwrite, nonatomic) CGPoint center;
9 | // The amount of distortion to apply, from (-2.0, -2.0) to (2.0, 2.0), with a default of (0.05, 0.05)
10 | @property(readwrite, nonatomic) CGSize pixelSize;
11 |
12 |
13 | @end
14 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImagePolkaDotFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImagePixellateFilter.h"
2 |
3 | @interface GPUImagePolkaDotFilter : GPUImagePixellateFilter
4 | {
5 | GLint dotScalingUniform;
6 | }
7 |
8 | @property(readwrite, nonatomic) CGFloat dotScaling;
9 |
10 | @end
11 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImagePosterizeFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /** This reduces the color dynamic range into the number of steps specified, leading to a cartoon-like simple shading of the image.
4 | */
5 | @interface GPUImagePosterizeFilter : GPUImageFilter
6 | {
7 | GLint colorLevelsUniform;
8 | }
9 |
10 | /** The number of color levels to reduce the image space to. This ranges from 1 to 256, with a default of 10.
11 | */
12 | @property(readwrite, nonatomic) NSUInteger colorLevels;
13 |
14 | @end
15 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImagePosterizeFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImagePosterizeFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImagePosterizeFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 |
8 | uniform sampler2D inputImageTexture;
9 | uniform highp float colorLevels;
10 |
11 | void main()
12 | {
13 | highp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
14 |
15 | gl_FragColor = floor((textureColor * colorLevels) + vec4(0.5)) / colorLevels;
16 | }
17 | );
18 | #else
19 | NSString *const kGPUImagePosterizeFragmentShaderString = SHADER_STRING
20 | (
21 | varying vec2 textureCoordinate;
22 |
23 | uniform sampler2D inputImageTexture;
24 | uniform float colorLevels;
25 |
26 | void main()
27 | {
28 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
29 |
30 | gl_FragColor = floor((textureColor * colorLevels) + vec4(0.5)) / colorLevels;
31 | }
32 | );
33 | #endif
34 |
35 | @implementation GPUImagePosterizeFilter
36 |
37 | @synthesize colorLevels = _colorLevels;
38 |
39 | #pragma mark -
40 | #pragma mark Initialization
41 |
42 | - (id)init;
43 | {
44 | if (!(self = [super initWithFragmentShaderFromString:kGPUImagePosterizeFragmentShaderString]))
45 | {
46 | return nil;
47 | }
48 |
49 | colorLevelsUniform = [filterProgram uniformIndex:@"colorLevels"];
50 | self.colorLevels = 10;
51 |
52 | return self;
53 | }
54 |
55 | #pragma mark -
56 | #pragma mark Accessors
57 |
58 | - (void)setColorLevels:(NSUInteger)newValue;
59 | {
60 | _colorLevels = newValue;
61 |
62 | [self setFloat:_colorLevels forUniform:colorLevelsUniform program:filterProgram];
63 | }
64 |
65 | @end
66 |
67 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImagePrewittEdgeDetectionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageSobelEdgeDetectionFilter.h"
2 |
3 | @interface GPUImagePrewittEdgeDetectionFilter : GPUImageSobelEdgeDetectionFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageRGBClosingFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImageRGBErosionFilter;
4 | @class GPUImageRGBDilationFilter;
5 |
6 | // A filter that first performs a dilation on each color channel of an image, followed by an erosion of the same radius.
7 | // This helps to filter out smaller dark elements.
8 |
9 | @interface GPUImageRGBClosingFilter : GPUImageFilterGroup
10 | {
11 | GPUImageRGBErosionFilter *erosionFilter;
12 | GPUImageRGBDilationFilter *dilationFilter;
13 | }
14 |
15 | - (id)initWithRadius:(NSUInteger)radius;
16 |
17 |
18 | @end
19 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageRGBClosingFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageRGBClosingFilter.h"
2 | #import "GPUImageRGBErosionFilter.h"
3 | #import "GPUImageRGBDilationFilter.h"
4 |
5 | @implementation GPUImageRGBClosingFilter
6 |
7 | - (id)init;
8 | {
9 | if (!(self = [self initWithRadius:1]))
10 | {
11 | return nil;
12 | }
13 |
14 | return self;
15 | }
16 |
17 | - (id)initWithRadius:(NSUInteger)radius;
18 | {
19 | if (!(self = [super init]))
20 | {
21 | return nil;
22 | }
23 |
24 | // First pass: dilation
25 | dilationFilter = [[GPUImageRGBDilationFilter alloc] initWithRadius:radius];
26 | [self addFilter:dilationFilter];
27 |
28 | // Second pass: erosion
29 | erosionFilter = [[GPUImageRGBErosionFilter alloc] initWithRadius:radius];
30 | [self addFilter:erosionFilter];
31 |
32 | [dilationFilter addTarget:erosionFilter];
33 |
34 | self.initialFilters = [NSArray arrayWithObjects:dilationFilter, nil];
35 | self.terminalFilter = erosionFilter;
36 |
37 | return self;
38 | }
39 |
40 |
41 | @end
42 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageRGBDilationFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoPassTextureSamplingFilter.h"
2 |
3 | // For each pixel, this sets it to the maximum value of each color channel in a rectangular neighborhood extending out dilationRadius pixels from the center.
4 | // This extends out brighter colors, and can be used for abstraction of color images.
5 |
6 | @interface GPUImageRGBDilationFilter : GPUImageTwoPassTextureSamplingFilter
7 |
8 | // Acceptable values for dilationRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4.
9 | - (id)initWithRadius:(NSUInteger)dilationRadius;
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageRGBErosionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoPassTextureSamplingFilter.h"
2 |
3 | // For each pixel, this sets it to the minimum value of each color channel in a rectangular neighborhood extending out dilationRadius pixels from the center.
4 | // This extends out dark features, and can be used for abstraction of color images.
5 |
6 | @interface GPUImageRGBErosionFilter : GPUImageTwoPassTextureSamplingFilter
7 |
8 | // Acceptable values for erosionRadius, which sets the distance in pixels to sample out from the center, are 1, 2, 3, and 4.
9 | - (id)initWithRadius:(NSUInteger)erosionRadius;
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageRGBFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageRGBFilter : GPUImageFilter
4 | {
5 | GLint redUniform;
6 | GLint greenUniform;
7 | GLint blueUniform;
8 | }
9 |
10 | // Normalized values by which each color channel is multiplied. The range is from 0.0 up, with 1.0 as the default.
11 | @property (readwrite, nonatomic) CGFloat red;
12 | @property (readwrite, nonatomic) CGFloat green;
13 | @property (readwrite, nonatomic) CGFloat blue;
14 |
15 | @end
16 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageRGBOpeningFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImageRGBErosionFilter;
4 | @class GPUImageRGBDilationFilter;
5 |
6 | // A filter that first performs an erosion on each color channel of an image, followed by a dilation of the same radius.
7 | // This helps to filter out smaller bright elements.
8 |
9 | @interface GPUImageRGBOpeningFilter : GPUImageFilterGroup
10 | {
11 | GPUImageRGBErosionFilter *erosionFilter;
12 | GPUImageRGBDilationFilter *dilationFilter;
13 | }
14 |
15 | - (id)initWithRadius:(NSUInteger)radius;
16 |
17 | @end
18 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageRGBOpeningFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageRGBOpeningFilter.h"
2 | #import "GPUImageRGBErosionFilter.h"
3 | #import "GPUImageRGBDilationFilter.h"
4 |
5 | @implementation GPUImageRGBOpeningFilter
6 |
7 | - (id)init;
8 | {
9 | if (!(self = [self initWithRadius:1]))
10 | {
11 | return nil;
12 | }
13 |
14 | return self;
15 | }
16 |
17 | - (id)initWithRadius:(NSUInteger)radius;
18 | {
19 | if (!(self = [super init]))
20 | {
21 | return nil;
22 | }
23 |
24 | // First pass: erosion
25 | erosionFilter = [[GPUImageRGBErosionFilter alloc] initWithRadius:radius];
26 | [self addFilter:erosionFilter];
27 |
28 | // Second pass: dilation
29 | dilationFilter = [[GPUImageRGBDilationFilter alloc] initWithRadius:radius];
30 | [self addFilter:dilationFilter];
31 |
32 | [erosionFilter addTarget:dilationFilter];
33 |
34 | self.initialFilters = [NSArray arrayWithObjects:erosionFilter, nil];
35 | self.terminalFilter = dilationFilter;
36 |
37 | return self;
38 | }
39 |
40 |
41 | @end
42 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageRawDataInput.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageOutput.h"
2 |
3 | // The bytes passed into this input are not copied or retained, but you are free to deallocate them after they are used by this filter.
4 | // The bytes are uploaded and stored within a texture, so nothing is kept locally.
5 | // The default format for input bytes is GPUPixelFormatBGRA, unless specified with pixelFormat:
6 | // The default type for input bytes is GPUPixelTypeUByte, unless specified with pixelType:
7 |
8 | typedef enum {
9 | GPUPixelFormatBGRA = GL_BGRA,
10 | GPUPixelFormatRGBA = GL_RGBA,
11 | GPUPixelFormatRGB = GL_RGB,
12 | GPUPixelFormatLuminance = GL_LUMINANCE
13 | } GPUPixelFormat;
14 |
15 | typedef enum {
16 | GPUPixelTypeUByte = GL_UNSIGNED_BYTE,
17 | GPUPixelTypeFloat = GL_FLOAT
18 | } GPUPixelType;
19 |
20 | @interface GPUImageRawDataInput : GPUImageOutput
21 | {
22 | CGSize uploadedImageSize;
23 |
24 | dispatch_semaphore_t dataUpdateSemaphore;
25 | }
26 |
27 | // Initialization and teardown
28 | - (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize;
29 | - (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize pixelFormat:(GPUPixelFormat)pixelFormat;
30 | - (id)initWithBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize pixelFormat:(GPUPixelFormat)pixelFormat type:(GPUPixelType)pixelType;
31 |
32 | /** Input data pixel format
33 | */
34 | @property (readwrite, nonatomic) GPUPixelFormat pixelFormat;
35 | @property (readwrite, nonatomic) GPUPixelType pixelType;
36 |
37 | // Image rendering
38 | - (void)updateDataFromBytes:(GLubyte *)bytesToUpload size:(CGSize)imageSize;
39 | - (void)processData;
40 | - (void)processDataForTimestamp:(CMTime)frameTime;
41 | - (CGSize)outputImageSize;
42 |
43 | @end
44 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageRawDataOutput.h:
--------------------------------------------------------------------------------
1 | #import
2 | #import "GPUImageContext.h"
3 |
4 | struct GPUByteColorVector {
5 | GLubyte red;
6 | GLubyte green;
7 | GLubyte blue;
8 | GLubyte alpha;
9 | };
10 | typedef struct GPUByteColorVector GPUByteColorVector;
11 |
12 | @protocol GPUImageRawDataProcessor;
13 |
14 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
15 | @interface GPUImageRawDataOutput : NSObject {
16 | CGSize imageSize;
17 | GPUImageRotationMode inputRotation;
18 | BOOL outputBGRA;
19 | }
20 | #else
21 | @interface GPUImageRawDataOutput : NSObject {
22 | CGSize imageSize;
23 | GPUImageRotationMode inputRotation;
24 | BOOL outputBGRA;
25 | }
26 | #endif
27 |
28 | @property(readonly) GLubyte *rawBytesForImage;
29 | @property(nonatomic, copy) void(^newFrameAvailableBlock)(void);
30 | @property(nonatomic) BOOL enabled;
31 |
32 | // Initialization and teardown
33 | - (id)initWithImageSize:(CGSize)newImageSize resultsInBGRAFormat:(BOOL)resultsInBGRAFormat;
34 |
35 | // Data access
36 | - (GPUByteColorVector)colorAtLocation:(CGPoint)locationInImage;
37 | - (NSUInteger)bytesPerRowInOutput;
38 |
39 | - (void)setImageSize:(CGSize)newImageSize;
40 |
41 | - (void)lockFramebufferForReading;
42 | - (void)unlockFramebufferAfterReading;
43 |
44 | @end
45 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSaturationBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageSaturationBlendFilter : GPUImageTwoInputFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSaturationFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /** Adjusts the saturation of an image
4 | */
5 | @interface GPUImageSaturationFilter : GPUImageFilter
6 | {
7 | GLint saturationUniform;
8 | }
9 |
10 | /** Saturation ranges from 0.0 (fully desaturated) to 2.0 (max saturation), with 1.0 as the normal level
11 | */
12 | @property(readwrite, nonatomic) CGFloat saturation;
13 |
14 | @end
15 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageScreenBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageScreenBlendFilter : GPUImageTwoInputFilter
4 | {
5 | }
6 |
7 | @end
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageScreenBlendFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageScreenBlendFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageScreenBlendFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 | varying highp vec2 textureCoordinate2;
8 |
9 | uniform sampler2D inputImageTexture;
10 | uniform sampler2D inputImageTexture2;
11 |
12 | void main()
13 | {
14 | mediump vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
15 | mediump vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
16 | mediump vec4 whiteColor = vec4(1.0);
17 | gl_FragColor = whiteColor - ((whiteColor - textureColor2) * (whiteColor - textureColor));
18 | }
19 | );
20 | #else
21 | NSString *const kGPUImageScreenBlendFragmentShaderString = SHADER_STRING
22 | (
23 | varying vec2 textureCoordinate;
24 | varying vec2 textureCoordinate2;
25 |
26 | uniform sampler2D inputImageTexture;
27 | uniform sampler2D inputImageTexture2;
28 |
29 | void main()
30 | {
31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
33 | vec4 whiteColor = vec4(1.0);
34 | gl_FragColor = whiteColor - ((whiteColor - textureColor2) * (whiteColor - textureColor));
35 | }
36 | );
37 | #endif
38 |
39 | @implementation GPUImageScreenBlendFilter
40 |
41 | - (id)init;
42 | {
43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageScreenBlendFragmentShaderString]))
44 | {
45 | return nil;
46 | }
47 |
48 | return self;
49 | }
50 |
51 | @end
52 |
53 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSepiaFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageColorMatrixFilter.h"
2 |
3 | /// Simple sepia tone filter
4 | @interface GPUImageSepiaFilter : GPUImageColorMatrixFilter
5 |
6 | @end
7 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSepiaFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageSepiaFilter.h"
2 |
3 | @implementation GPUImageSepiaFilter
4 |
5 | - (id)init;
6 | {
7 | if (!(self = [super init]))
8 | {
9 | return nil;
10 | }
11 |
12 | self.intensity = 1.0;
13 | self.colorMatrix = (GPUMatrix4x4){
14 | {0.3588, 0.7044, 0.1368, 0.0},
15 | {0.2990, 0.5870, 0.1140, 0.0},
16 | {0.2392, 0.4696, 0.0912 ,0.0},
17 | {0,0,0,1.0},
18 | };
19 |
20 | return self;
21 | }
22 |
23 | @end
24 |
25 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSharpenFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageSharpenFilter : GPUImageFilter
4 | {
5 | GLint sharpnessUniform;
6 | GLint imageWidthFactorUniform, imageHeightFactorUniform;
7 | }
8 |
9 | // Sharpness ranges from -4.0 to 4.0, with 0.0 as the normal level
10 | @property(readwrite, nonatomic) CGFloat sharpness;
11 |
12 | @end
13 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageShiTomasiFeatureDetectionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageHarrisCornerDetectionFilter.h"
2 |
3 | /** Shi-Tomasi feature detector
4 |
5 | This is the Shi-Tomasi feature detector, as described in
6 | J. Shi and C. Tomasi. Good features to track. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 593-600, June 1994.
7 | */
8 |
9 | @interface GPUImageShiTomasiFeatureDetectionFilter : GPUImageHarrisCornerDetectionFilter
10 |
11 | // Compared to the Harris corner detector, the default sensitivity value for this detector is set to 1.5
12 |
13 | @end
14 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSingleComponentGaussianBlurFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageGaussianBlurFilter.h"
2 |
3 | // This filter merely performs the standard Gaussian blur on the red color channel (assuming a luminance image)
4 |
5 | @interface GPUImageSingleComponentGaussianBlurFilter : GPUImageGaussianBlurFilter
6 |
7 | @end
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSketchFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageSobelEdgeDetectionFilter.h"
2 |
3 | /** Converts video to look like a sketch.
4 |
5 | This is just the Sobel edge detection filter with the colors inverted.
6 | */
7 | @interface GPUImageSketchFilter : GPUImageSobelEdgeDetectionFilter
8 | {
9 | }
10 |
11 | @end
12 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSkinToneFilter.h:
--------------------------------------------------------------------------------
1 | //
2 | // GPUImageSkinToneFilter.h
3 | //
4 | //
5 | // Created by github.com/r3mus on 8/14/15.
6 | //
7 | //
8 |
9 | #import "GPUImageTwoInputFilter.h"
10 |
11 | typedef NS_ENUM(NSUInteger, GPUImageSkinToneUpperColor) {
12 | GPUImageSkinToneUpperColorGreen,
13 | GPUImageSkinToneUpperColorOrange
14 | };
15 |
16 | extern NSString *const kGPUImageSkinToneFragmentShaderString;
17 |
18 | @interface GPUImageSkinToneFilter : GPUImageFilter
19 | {
20 | GLint skinToneAdjustUniform;
21 | GLint skinHueUniform;
22 | GLint skinHueThresholdUniform;
23 | GLint maxHueShiftUniform;
24 | GLint maxSaturationShiftUniform;
25 | GLint upperSkinToneColorUniform;
26 | }
27 |
28 | // The amount of effect to apply, between -1.0 (pink) and +1.0 (orange OR green). Default is 0.0.
29 | @property (nonatomic, readwrite) CGFloat skinToneAdjust;
30 |
31 | // The initial hue of skin to adjust. Default is 0.05 (a common skin red).
32 | @property (nonatomic, readwrite) CGFloat skinHue;
33 |
34 | // The bell curve "breadth" of the skin hue adjustment (i.e. how different from the original skinHue will the modifications effect).
35 | // Default is 40.0
36 | @property (nonatomic, readwrite) CGFloat skinHueThreshold;
37 |
38 | // The maximum amount of hue shift allowed in the adjustments that affect hue (pink, green). Default = 0.25.
39 | @property (nonatomic, readwrite) CGFloat maxHueShift;
40 |
41 | // The maximum amount of saturation shift allowed in the adjustments that affect saturation (orange). Default = 0.4.
42 | @property (nonatomic, readwrite) CGFloat maxSaturationShift;
43 |
44 | // Defines whether the upper range (> 0.0) will change the skin tone to green (hue) or orange (saturation)
45 | @property (nonatomic, readwrite) GPUImageSkinToneUpperColor upperSkinToneColor;
46 |
47 | @end
48 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSmoothToonFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImageGaussianBlurFilter;
4 | @class GPUImageToonFilter;
5 |
6 | /** This uses a similar process as the GPUImageToonFilter, only it precedes the toon effect with a Gaussian blur to smooth out noise.
7 | */
8 | @interface GPUImageSmoothToonFilter : GPUImageFilterGroup
9 | {
10 | GPUImageGaussianBlurFilter *blurFilter;
11 | GPUImageToonFilter *toonFilter;
12 | }
13 |
14 | /// The image width and height factors tweak the appearance of the edges. By default, they match the filter size in pixels
15 | @property(readwrite, nonatomic) CGFloat texelWidth;
16 | /// The image width and height factors tweak the appearance of the edges. By default, they match the filter size in pixels
17 | @property(readwrite, nonatomic) CGFloat texelHeight;
18 |
19 | /// The radius of the underlying Gaussian blur. The default is 2.0.
20 | @property (readwrite, nonatomic) CGFloat blurRadiusInPixels;
21 |
22 | /// The threshold at which to apply the edges, default of 0.2
23 | @property(readwrite, nonatomic) CGFloat threshold;
24 |
25 | /// The levels of quantization for the posterization of colors within the scene, with a default of 10.0
26 | @property(readwrite, nonatomic) CGFloat quantizationLevels;
27 |
28 | @end
29 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSobelEdgeDetectionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoPassFilter.h"
2 |
3 | @interface GPUImageSobelEdgeDetectionFilter : GPUImageTwoPassFilter
4 | {
5 | GLint texelWidthUniform, texelHeightUniform, edgeStrengthUniform;
6 | BOOL hasOverriddenImageSizeFactor;
7 | }
8 |
9 | // The texel width and height factors tweak the appearance of the edges. By default, they match the inverse of the filter size in pixels
10 | @property(readwrite, nonatomic) CGFloat texelWidth;
11 | @property(readwrite, nonatomic) CGFloat texelHeight;
12 |
13 | // The filter strength property affects the dynamic range of the filter. High values can make edges more visible, but can lead to saturation. Default of 1.0.
14 | @property(readwrite, nonatomic) CGFloat edgeStrength;
15 |
16 | @end
17 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSoftEleganceFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImagePicture;
4 |
5 | /** A photo filter based on Soft Elegance Photoshop action
6 | http://h-d-stock.deviantart.com/art/H-D-A-soft-elegance-70107603
7 | */
8 |
9 | // Note: If you want to use this effect you have to add
10 | // lookup_soft_elegance_1.png and lookup_soft_elegance_2.png
11 | // from Resources folder to your application bundle.
12 |
13 | @interface GPUImageSoftEleganceFilter : GPUImageFilterGroup
14 | {
15 | GPUImagePicture *lookupImageSource1;
16 | GPUImagePicture *lookupImageSource2;
17 | }
18 |
19 | @end
20 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSoftLightBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageSoftLightBlendFilter : GPUImageTwoInputFilter
4 | {
5 | }
6 |
7 | @end
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSolarizeFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /** Pixels with a luminance above the threshold will invert their color
4 | */
5 | @interface GPUImageSolarizeFilter : GPUImageFilter
6 | {
7 | GLint thresholdUniform;
8 | }
9 |
10 | /** Anything above this luminance will be inverted, and anything below normal. Ranges from 0.0 to 1.0, with 0.5 as the default
11 | */
12 | @property(readwrite, nonatomic) CGFloat threshold;
13 |
14 | @end
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSolidColorGenerator.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | // This outputs an image with a constant color. You need to use -forceProcessingAtSize: in order to set the output image
4 | // dimensions, or this won't work correctly
5 |
6 |
7 | @interface GPUImageSolidColorGenerator : GPUImageFilter
8 | {
9 | GLint colorUniform;
10 | GLint useExistingAlphaUniform;
11 | }
12 |
13 | // This color dictates what the output image will be filled with
14 | @property(readwrite, nonatomic) GPUVector4 color;
15 | @property(readwrite, nonatomic, assign) BOOL useExistingAlpha; // whether to use the alpha of the existing image or not, default is NO
16 |
17 | - (void)setColorRed:(CGFloat)redComponent green:(CGFloat)greenComponent blue:(CGFloat)blueComponent alpha:(CGFloat)alphaComponent;
18 |
19 | @end
20 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSourceOverBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageSourceOverBlendFilter : GPUImageTwoInputFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSourceOverBlendFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageSourceOverBlendFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageSourceOverBlendFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 | varying highp vec2 textureCoordinate2;
8 |
9 | uniform sampler2D inputImageTexture;
10 | uniform sampler2D inputImageTexture2;
11 |
12 | void main()
13 | {
14 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
15 | lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate);
16 |
17 | gl_FragColor = mix(textureColor, textureColor2, textureColor2.a);
18 | }
19 | );
20 | #else
21 | NSString *const kGPUImageSourceOverBlendFragmentShaderString = SHADER_STRING
22 | (
23 | varying vec2 textureCoordinate;
24 | varying vec2 textureCoordinate2;
25 |
26 | uniform sampler2D inputImageTexture;
27 | uniform sampler2D inputImageTexture2;
28 |
29 | void main()
30 | {
31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate);
33 |
34 | gl_FragColor = mix(textureColor, textureColor2, textureColor2.a);
35 | }
36 | );
37 | #endif
38 |
39 | @implementation GPUImageSourceOverBlendFilter
40 |
41 | - (id)init;
42 | {
43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageSourceOverBlendFragmentShaderString]))
44 | {
45 | return nil;
46 | }
47 |
48 | return self;
49 | }
50 |
51 | @end
52 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSphereRefractionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageSphereRefractionFilter : GPUImageFilter
4 | {
5 | GLint radiusUniform, centerUniform, aspectRatioUniform, refractiveIndexUniform;
6 | }
7 |
8 | /// The center about which to apply the distortion, with a default of (0.5, 0.5)
9 | @property(readwrite, nonatomic) CGPoint center;
10 | /// The radius of the distortion, ranging from 0.0 to 1.0, with a default of 0.25
11 | @property(readwrite, nonatomic) CGFloat radius;
12 | /// The index of refraction for the sphere, with a default of 0.71
13 | @property(readwrite, nonatomic) CGFloat refractiveIndex;
14 |
15 | @end
16 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageStretchDistortionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /** Creates a stretch distortion of the image
4 | */
5 | @interface GPUImageStretchDistortionFilter : GPUImageFilter {
6 | GLint centerUniform;
7 | }
8 |
9 | /** The center about which to apply the distortion, with a default of (0.5, 0.5)
10 | */
11 | @property(readwrite, nonatomic) CGPoint center;
12 |
13 | @end
14 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSubtractBlendFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageSubtractBlendFilter : GPUImageTwoInputFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSubtractBlendFilter.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageSubtractBlendFilter.h"
2 |
3 | #if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
4 | NSString *const kGPUImageSubtractBlendFragmentShaderString = SHADER_STRING
5 | (
6 | varying highp vec2 textureCoordinate;
7 | varying highp vec2 textureCoordinate2;
8 |
9 | uniform sampler2D inputImageTexture;
10 | uniform sampler2D inputImageTexture2;
11 |
12 | void main()
13 | {
14 | lowp vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
15 | lowp vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
16 |
17 | gl_FragColor = vec4(textureColor.rgb - textureColor2.rgb, textureColor.a);
18 | }
19 | );
20 | #else
21 | NSString *const kGPUImageSubtractBlendFragmentShaderString = SHADER_STRING
22 | (
23 | varying vec2 textureCoordinate;
24 | varying vec2 textureCoordinate2;
25 |
26 | uniform sampler2D inputImageTexture;
27 | uniform sampler2D inputImageTexture2;
28 |
29 | void main()
30 | {
31 | vec4 textureColor = texture2D(inputImageTexture, textureCoordinate);
32 | vec4 textureColor2 = texture2D(inputImageTexture2, textureCoordinate2);
33 |
34 | gl_FragColor = vec4(textureColor.rgb - textureColor2.rgb, textureColor.a);
35 | }
36 | );
37 | #endif
38 |
39 | @implementation GPUImageSubtractBlendFilter
40 |
41 | - (id)init;
42 | {
43 | if (!(self = [super initWithFragmentShaderFromString:kGPUImageSubtractBlendFragmentShaderString]))
44 | {
45 | return nil;
46 | }
47 |
48 | return self;
49 | }
50 |
51 | @end
52 |
53 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageSwirlFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /** Creates a swirl distortion on the image
4 | */
5 | @interface GPUImageSwirlFilter : GPUImageFilter
6 | {
7 | GLint radiusUniform, centerUniform, angleUniform;
8 | }
9 |
10 | /// The center about which to apply the distortion, with a default of (0.5, 0.5)
11 | @property(readwrite, nonatomic) CGPoint center;
12 | /// The radius of the distortion, ranging from 0.0 to 1.0, with a default of 0.5
13 | @property(readwrite, nonatomic) CGFloat radius;
14 | /// The amount of distortion to apply, with a minimum of 0.0 and a default of 1.0
15 | @property(readwrite, nonatomic) CGFloat angle;
16 |
17 | @end
18 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageTextureInput.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageOutput.h"
2 |
3 | @interface GPUImageTextureInput : GPUImageOutput
4 | {
5 | CGSize textureSize;
6 | }
7 |
8 | // Initialization and teardown
9 | - (id)initWithTexture:(GLuint)newInputTexture size:(CGSize)newTextureSize;
10 |
11 | // Image rendering
12 | - (void)processTextureWithFrameTime:(CMTime)frameTime;
13 |
14 | @end
15 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageTextureInput.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageTextureInput.h"
2 |
3 | @implementation GPUImageTextureInput
4 |
5 | #pragma mark -
6 | #pragma mark Initialization and teardown
7 |
8 | - (id)initWithTexture:(GLuint)newInputTexture size:(CGSize)newTextureSize;
9 | {
10 | if (!(self = [super init]))
11 | {
12 | return nil;
13 | }
14 |
15 | runSynchronouslyOnVideoProcessingQueue(^{
16 | [GPUImageContext useImageProcessingContext];
17 | });
18 |
19 | textureSize = newTextureSize;
20 |
21 | runSynchronouslyOnVideoProcessingQueue(^{
22 | outputFramebuffer = [[GPUImageFramebuffer alloc] initWithSize:newTextureSize overriddenTexture:newInputTexture];
23 | });
24 |
25 | return self;
26 | }
27 |
28 | #pragma mark -
29 | #pragma mark Image rendering
30 |
31 | - (void)processTextureWithFrameTime:(CMTime)frameTime;
32 | {
33 | runAsynchronouslyOnVideoProcessingQueue(^{
34 | for (id currentTarget in targets)
35 | {
36 | NSInteger indexOfObject = [targets indexOfObject:currentTarget];
37 | NSInteger targetTextureIndex = [[targetTextureIndices objectAtIndex:indexOfObject] integerValue];
38 |
39 | [currentTarget setInputSize:textureSize atIndex:targetTextureIndex];
40 | [currentTarget setInputFramebuffer:outputFramebuffer atIndex:targetTextureIndex];
41 | [currentTarget newFrameReadyAtTime:frameTime atIndex:targetTextureIndex];
42 | }
43 | });
44 | }
45 |
46 | @end
47 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageTextureOutput.h:
--------------------------------------------------------------------------------
1 | #import
2 | #import "GPUImageContext.h"
3 |
4 | @protocol GPUImageTextureOutputDelegate;
5 |
6 | @interface GPUImageTextureOutput : NSObject
7 | {
8 | GPUImageFramebuffer *firstInputFramebuffer;
9 | }
10 |
11 | @property(readwrite, unsafe_unretained, nonatomic) id delegate;
12 | @property(readonly) GLuint texture;
13 | @property(nonatomic) BOOL enabled;
14 |
15 | - (void)doneWithTexture;
16 |
17 | @end
18 |
19 | @protocol GPUImageTextureOutputDelegate
20 | - (void)newFrameReadyFromTextureOutput:(GPUImageTextureOutput *)callbackTextureOutput;
21 | @end
22 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageTextureOutput.m:
--------------------------------------------------------------------------------
1 | #import "GPUImageTextureOutput.h"
2 |
3 | @implementation GPUImageTextureOutput
4 |
5 | @synthesize delegate = _delegate;
6 | @synthesize texture = _texture;
7 | @synthesize enabled;
8 |
9 | #pragma mark -
10 | #pragma mark Initialization and teardown
11 |
12 | - (id)init;
13 | {
14 | if (!(self = [super init]))
15 | {
16 | return nil;
17 | }
18 |
19 | self.enabled = YES;
20 |
21 | return self;
22 | }
23 |
24 | - (void)doneWithTexture;
25 | {
26 | [firstInputFramebuffer unlock];
27 | }
28 |
29 | #pragma mark -
30 | #pragma mark GPUImageInput protocol
31 |
32 | - (void)newFrameReadyAtTime:(CMTime)frameTime atIndex:(NSInteger)textureIndex;
33 | {
34 | [_delegate newFrameReadyFromTextureOutput:self];
35 | }
36 |
37 | - (NSInteger)nextAvailableTextureIndex;
38 | {
39 | return 0;
40 | }
41 |
42 | // TODO: Deal with the fact that the texture changes regularly as a result of the caching
43 | - (void)setInputFramebuffer:(GPUImageFramebuffer *)newInputFramebuffer atIndex:(NSInteger)textureIndex;
44 | {
45 | firstInputFramebuffer = newInputFramebuffer;
46 | [firstInputFramebuffer lock];
47 |
48 | _texture = [firstInputFramebuffer texture];
49 | }
50 |
51 | - (void)setInputRotation:(GPUImageRotationMode)newInputRotation atIndex:(NSInteger)textureIndex;
52 | {
53 | }
54 |
55 | - (void)setInputSize:(CGSize)newSize atIndex:(NSInteger)textureIndex;
56 | {
57 | }
58 |
59 | - (CGSize)maximumOutputSize;
60 | {
61 | return CGSizeZero;
62 | }
63 |
64 | - (void)endProcessing
65 | {
66 | }
67 |
68 | - (BOOL)shouldIgnoreUpdatesToThisTarget;
69 | {
70 | return NO;
71 | }
72 |
73 | - (BOOL)wantsMonochromeInput;
74 | {
75 | return NO;
76 | }
77 |
78 | - (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue;
79 | {
80 |
81 | }
82 |
83 | @end
84 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageThreeInputFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | extern NSString *const kGPUImageThreeInputTextureVertexShaderString;
4 |
5 | @interface GPUImageThreeInputFilter : GPUImageTwoInputFilter
6 | {
7 | GPUImageFramebuffer *thirdInputFramebuffer;
8 |
9 | GLint filterThirdTextureCoordinateAttribute;
10 | GLint filterInputTextureUniform3;
11 | GPUImageRotationMode inputRotation3;
12 | GLuint filterSourceTexture3;
13 | CMTime thirdFrameTime;
14 |
15 | BOOL hasSetSecondTexture, hasReceivedThirdFrame, thirdFrameWasVideo;
16 | BOOL thirdFrameCheckDisabled;
17 | }
18 |
19 | - (void)disableThirdFrameCheck;
20 |
21 | @end
22 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageThresholdEdgeDetectionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageSobelEdgeDetectionFilter.h"
2 |
3 | @interface GPUImageThresholdEdgeDetectionFilter : GPUImageSobelEdgeDetectionFilter
4 | {
5 | GLint thresholdUniform;
6 | }
7 |
8 | /** Any edge above this threshold will be black, and anything below white. Ranges from 0.0 to 1.0, with 0.8 as the default
9 | */
10 | @property(readwrite, nonatomic) CGFloat threshold;
11 |
12 | @end
13 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageThresholdSketchFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageThresholdEdgeDetectionFilter.h"
2 |
3 | @interface GPUImageThresholdSketchFilter : GPUImageThresholdEdgeDetectionFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageThresholdedNonMaximumSuppressionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImage3x3TextureSamplingFilter.h"
2 |
3 | @interface GPUImageThresholdedNonMaximumSuppressionFilter : GPUImage3x3TextureSamplingFilter
4 | {
5 | GLint thresholdUniform;
6 | }
7 |
8 | /** Any local maximum above this threshold will be white, and anything below black. Ranges from 0.0 to 1.0, with 0.8 as the default
9 | */
10 | @property(readwrite, nonatomic) CGFloat threshold;
11 |
12 | - (id)initWithPackedColorspace:(BOOL)inputUsesPackedColorspace;
13 |
14 | @end
15 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageTiltShiftFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImageGaussianBlurFilter;
4 |
5 | /// A simulated tilt shift lens effect
6 | @interface GPUImageTiltShiftFilter : GPUImageFilterGroup
7 | {
8 | GPUImageGaussianBlurFilter *blurFilter;
9 | GPUImageFilter *tiltShiftFilter;
10 | }
11 |
12 | /// The radius of the underlying blur, in pixels. This is 7.0 by default.
13 | @property(readwrite, nonatomic) CGFloat blurRadiusInPixels;
14 |
15 | /// The normalized location of the top of the in-focus area in the image, this value should be lower than bottomFocusLevel, default 0.4
16 | @property(readwrite, nonatomic) CGFloat topFocusLevel;
17 |
18 | /// The normalized location of the bottom of the in-focus area in the image, this value should be higher than topFocusLevel, default 0.6
19 | @property(readwrite, nonatomic) CGFloat bottomFocusLevel;
20 |
21 | /// The rate at which the image gets blurry away from the in-focus region, default 0.2
22 | @property(readwrite, nonatomic) CGFloat focusFallOffRate;
23 |
24 | @end
25 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageToneCurveFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageToneCurveFilter : GPUImageFilter
4 |
5 | @property(readwrite, nonatomic, copy) NSArray *redControlPoints;
6 | @property(readwrite, nonatomic, copy) NSArray *greenControlPoints;
7 | @property(readwrite, nonatomic, copy) NSArray *blueControlPoints;
8 | @property(readwrite, nonatomic, copy) NSArray *rgbCompositeControlPoints;
9 |
10 | // Initialization and teardown
11 | - (id)initWithACVData:(NSData*)data;
12 |
13 | - (id)initWithACV:(NSString*)curveFilename;
14 | - (id)initWithACVURL:(NSURL*)curveFileURL;
15 |
16 | // This lets you set all three red, green, and blue tone curves at once.
17 | // NOTE: Deprecated this function because this effect can be accomplished
18 | // using the rgbComposite channel rather then setting all 3 R, G, and B channels.
19 | - (void)setRGBControlPoints:(NSArray *)points DEPRECATED_ATTRIBUTE;
20 |
21 | - (void)setPointsWithACV:(NSString*)curveFilename;
22 | - (void)setPointsWithACVURL:(NSURL*)curveFileURL;
23 |
24 | // Curve calculation
25 | - (NSMutableArray *)getPreparedSplineCurve:(NSArray *)points;
26 | - (NSMutableArray *)splineCurve:(NSArray *)points;
27 | - (NSMutableArray *)secondDerivative:(NSArray *)cgPoints;
28 | - (void)updateToneCurveTexture;
29 |
30 | @end
31 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageToonFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImage3x3TextureSamplingFilter.h"
2 |
3 | /** This uses Sobel edge detection to place a black border around objects,
4 | and then it quantizes the colors present in the image to give a cartoon-like quality to the image.
5 | */
6 | @interface GPUImageToonFilter : GPUImage3x3TextureSamplingFilter
7 | {
8 | GLint thresholdUniform, quantizationLevelsUniform;
9 | }
10 |
11 | /** The threshold at which to apply the edges, default of 0.2
12 | */
13 | @property(readwrite, nonatomic) CGFloat threshold;
14 |
15 | /** The levels of quantization for the posterization of colors within the scene, with a default of 10.0
16 | */
17 | @property(readwrite, nonatomic) CGFloat quantizationLevels;
18 |
19 | @end
20 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageTransformFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageTransformFilter : GPUImageFilter
4 | {
5 | GLint transformMatrixUniform, orthographicMatrixUniform;
6 | GPUMatrix4x4 orthographicMatrix;
7 | }
8 |
9 | // You can either set the transform to apply to be a 2-D affine transform or a 3-D transform. The default is the identity transform (the output image is identical to the input).
10 | @property(readwrite, nonatomic) CGAffineTransform affineTransform;
11 | @property(readwrite, nonatomic) CATransform3D transform3D;
12 |
13 | // This applies the transform to the raw frame data if set to YES, the default of NO takes the aspect ratio of the image input into account when rotating
14 | @property(readwrite, nonatomic) BOOL ignoreAspectRatio;
15 |
16 | // sets the anchor point to top left corner
17 | @property(readwrite, nonatomic) BOOL anchorTopLeft;
18 |
19 | @end
20 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageTwoInputCrossTextureSamplingFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageTwoInputCrossTextureSamplingFilter : GPUImageTwoInputFilter
4 | {
5 | GLint texelWidthUniform, texelHeightUniform;
6 |
7 | CGFloat texelWidth, texelHeight;
8 | BOOL hasOverriddenImageSizeFactor;
9 | }
10 |
11 | // The texel width and height determines how far out to sample from this texel. By default, this is the normalized width of a pixel, but this can be overridden for different effects.
12 | @property(readwrite, nonatomic) CGFloat texelWidth;
13 | @property(readwrite, nonatomic) CGFloat texelHeight;
14 |
15 | @end
16 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageTwoInputFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | extern NSString *const kGPUImageTwoInputTextureVertexShaderString;
4 |
5 | @interface GPUImageTwoInputFilter : GPUImageFilter
6 | {
7 | GPUImageFramebuffer *secondInputFramebuffer;
8 |
9 | GLint filterSecondTextureCoordinateAttribute;
10 | GLint filterInputTextureUniform2;
11 | GPUImageRotationMode inputRotation2;
12 | CMTime firstFrameTime, secondFrameTime;
13 |
14 | BOOL hasSetFirstTexture, hasReceivedFirstFrame, hasReceivedSecondFrame, firstFrameWasVideo, secondFrameWasVideo;
15 | BOOL firstFrameCheckDisabled, secondFrameCheckDisabled;
16 | }
17 |
18 | - (void)disableFirstFrameCheck;
19 | - (void)disableSecondFrameCheck;
20 |
21 | @end
22 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageTwoPassFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageTwoPassFilter : GPUImageFilter
4 | {
5 | GPUImageFramebuffer *secondOutputFramebuffer;
6 |
7 | GLProgram *secondFilterProgram;
8 | GLint secondFilterPositionAttribute, secondFilterTextureCoordinateAttribute;
9 | GLint secondFilterInputTextureUniform, secondFilterInputTextureUniform2;
10 |
11 | NSMutableDictionary *secondProgramUniformStateRestorationBlocks;
12 | }
13 |
14 | // Initialization and teardown
15 | - (id)initWithFirstStageVertexShaderFromString:(NSString *)firstStageVertexShaderString firstStageFragmentShaderFromString:(NSString *)firstStageFragmentShaderString secondStageVertexShaderFromString:(NSString *)secondStageVertexShaderString secondStageFragmentShaderFromString:(NSString *)secondStageFragmentShaderString;
16 | - (id)initWithFirstStageFragmentShaderFromString:(NSString *)firstStageFragmentShaderString secondStageFragmentShaderFromString:(NSString *)secondStageFragmentShaderString;
17 | - (void)initializeSecondaryAttributes;
18 |
19 | @end
20 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageTwoPassTextureSamplingFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoPassFilter.h"
2 |
3 | @interface GPUImageTwoPassTextureSamplingFilter : GPUImageTwoPassFilter
4 | {
5 | GLint verticalPassTexelWidthOffsetUniform, verticalPassTexelHeightOffsetUniform, horizontalPassTexelWidthOffsetUniform, horizontalPassTexelHeightOffsetUniform;
6 | GLfloat verticalPassTexelWidthOffset, verticalPassTexelHeightOffset, horizontalPassTexelWidthOffset, horizontalPassTexelHeightOffset;
7 | CGFloat _verticalTexelSpacing, _horizontalTexelSpacing;
8 | }
9 |
10 | // This sets the spacing between texels (in pixels) when sampling for the first. By default, this is 1.0
11 | @property(readwrite, nonatomic) CGFloat verticalTexelSpacing, horizontalTexelSpacing;
12 |
13 | @end
14 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageUIElement.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageOutput.h"
2 |
3 | @interface GPUImageUIElement : GPUImageOutput
4 |
5 | // Initialization and teardown
6 | - (id)initWithView:(UIView *)inputView;
7 | - (id)initWithLayer:(CALayer *)inputLayer;
8 |
9 | // Layer management
10 | - (CGSize)layerSizeInPixels;
11 | - (void)update;
12 | - (void)updateUsingCurrentTime;
13 | - (void)updateWithTimestamp:(CMTime)frameTime;
14 |
15 | @end
16 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageUnsharpMaskFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImageGaussianBlurFilter;
4 |
5 | @interface GPUImageUnsharpMaskFilter : GPUImageFilterGroup
6 | {
7 | GPUImageGaussianBlurFilter *blurFilter;
8 | GPUImageFilter *unsharpMaskFilter;
9 | }
10 | // The blur radius of the underlying Gaussian blur. The default is 4.0.
11 | @property (readwrite, nonatomic) CGFloat blurRadiusInPixels;
12 |
13 | // The strength of the sharpening, from 0.0 on up, with a default of 1.0
14 | @property(readwrite, nonatomic) CGFloat intensity;
15 |
16 | @end
17 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageVibranceFilter.h:
--------------------------------------------------------------------------------
1 | //
2 | // GPUImageVibranceFilter.h
3 | //
4 | //
5 | // Created by github.com/r3mus on 8/14/15.
6 | //
7 | //
8 |
9 | #import "GPUImageFilter.h"
10 |
11 | @interface GPUImageVibranceFilter : GPUImageFilter
12 | {
13 | GLint vibranceUniform;
14 | }
15 |
16 | // Modifies the saturation of desaturated colors, leaving saturated colors unmodified.
17 | // Value -1 to 1 (-1 is minimum vibrance, 0 is no change, and 1 is maximum vibrance)
18 | @property (readwrite, nonatomic) GLfloat vibrance;
19 |
20 | @end
21 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageVignetteFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | /** Performs a vignetting effect, fading out the image at the edges
4 | */
5 | @interface GPUImageVignetteFilter : GPUImageFilter
6 | {
7 | GLint vignetteCenterUniform, vignetteColorUniform, vignetteStartUniform, vignetteEndUniform;
8 | }
9 |
10 | // the center for the vignette in tex coords (defaults to 0.5, 0.5)
11 | @property (nonatomic, readwrite) CGPoint vignetteCenter;
12 |
13 | // The color to use for the Vignette (defaults to black)
14 | @property (nonatomic, readwrite) GPUVector3 vignetteColor;
15 |
16 | // The normalized distance from the center where the vignette effect starts. Default of 0.5.
17 | @property (nonatomic, readwrite) CGFloat vignetteStart;
18 |
19 | // The normalized distance from the center where the vignette effect ends. Default of 0.75.
20 | @property (nonatomic, readwrite) CGFloat vignetteEnd;
21 |
22 | @end
23 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageVoronoiConsumerFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageTwoInputFilter.h"
2 |
3 | @interface GPUImageVoronoiConsumerFilter : GPUImageTwoInputFilter
4 | {
5 | GLint sizeUniform;
6 | }
7 |
8 | @property (nonatomic, readwrite) CGSize sizeInPixels;
9 |
10 | @end
11 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageWeakPixelInclusionFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImage3x3TextureSamplingFilter.h"
2 |
3 | @interface GPUImageWeakPixelInclusionFilter : GPUImage3x3TextureSamplingFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageWhiteBalanceFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 | /**
3 | * Created by Alaric Cole
4 | * Allows adjustment of color temperature in terms of what an image was effectively shot in. This means higher Kelvin values will warm the image, while lower values will cool it.
5 |
6 | */
7 | @interface GPUImageWhiteBalanceFilter : GPUImageFilter
8 | {
9 | GLint temperatureUniform, tintUniform;
10 | }
11 | //choose color temperature, in degrees Kelvin
12 | @property(readwrite, nonatomic) CGFloat temperature;
13 |
14 | //adjust tint to compensate
15 | @property(readwrite, nonatomic) CGFloat tint;
16 |
17 | @end
18 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageXYDerivativeFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageSobelEdgeDetectionFilter.h"
2 |
3 | @interface GPUImageXYDerivativeFilter : GPUImageSobelEdgeDetectionFilter
4 |
5 | @end
6 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageZoomBlurFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilter.h"
2 |
3 | @interface GPUImageZoomBlurFilter : GPUImageFilter
4 |
5 | /** A multiplier for the blur size, ranging from 0.0 on up, with a default of 1.0
6 | */
7 | @property (readwrite, nonatomic) CGFloat blurSize;
8 |
9 | /** The normalized center of the blur. (0.5, 0.5) by default
10 | */
11 | @property (readwrite, nonatomic) CGPoint blurCenter;
12 |
13 | @end
14 |
--------------------------------------------------------------------------------
/GPUImage/Source/GPUImageiOSBlurFilter.h:
--------------------------------------------------------------------------------
1 | #import "GPUImageFilterGroup.h"
2 |
3 | @class GPUImageSaturationFilter;
4 | @class GPUImageGaussianBlurFilter;
5 | @class GPUImageLuminanceRangeFilter;
6 |
7 | @interface GPUImageiOSBlurFilter : GPUImageFilterGroup
8 | {
9 | GPUImageSaturationFilter *saturationFilter;
10 | GPUImageGaussianBlurFilter *blurFilter;
11 | GPUImageLuminanceRangeFilter *luminanceRangeFilter;
12 | }
13 |
14 | /** A radius in pixels to use for the blur, with a default of 12.0. This adjusts the sigma variable in the Gaussian distribution function.
15 | */
16 | @property (readwrite, nonatomic) CGFloat blurRadiusInPixels;
17 |
18 | /** Saturation ranges from 0.0 (fully desaturated) to 2.0 (max saturation), with 0.8 as the normal level
19 | */
20 | @property (readwrite, nonatomic) CGFloat saturation;
21 |
22 | /** The degree to which to downsample, then upsample the incoming image to minimize computations within the Gaussian blur, default of 4.0
23 | */
24 | @property (readwrite, nonatomic) CGFloat downsampling;
25 |
26 |
27 | /** The degree to reduce the luminance range, from 0.0 to 1.0. Default is 0.6.
28 | */
29 | @property (readwrite, nonatomic) CGFloat rangeReductionFactor;
30 |
31 | @end
32 |
--------------------------------------------------------------------------------
/GPUImage/Source/Mac/GPUImageMac-Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | English
7 | CFBundleExecutable
8 | ${EXECUTABLE_NAME}
9 | CFBundleIconFile
10 |
11 | CFBundleIdentifier
12 | com.sunsetlakesoftware.${PRODUCT_NAME:rfc1034identifier}
13 | CFBundleInfoDictionaryVersion
14 | 6.0
15 | CFBundleName
16 | ${PRODUCT_NAME}
17 | CFBundlePackageType
18 | FMWK
19 | CFBundleShortVersionString
20 | 1.0
21 | CFBundleSignature
22 | ????
23 | CFBundleVersion
24 | 1
25 | NSHumanReadableCopyright
26 | Copyright © 2013 Sunset Lake Software LLC. All rights reserved.
27 | NSPrincipalClass
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/GPUImage/Source/Mac/GPUImageMac-Prefix.pch:
--------------------------------------------------------------------------------
1 | //
2 | // Prefix header for all source files of the 'GPUImageMac' target in the 'GPUImageMac' project
3 | //
4 |
5 | #ifdef __OBJC__
6 | #import
7 | #endif
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/Mac/GPUImagePicture.h:
--------------------------------------------------------------------------------
1 | #import
2 | #import "GPUImageOutput.h"
3 |
4 | @interface GPUImagePicture : GPUImageOutput
5 | {
6 | CGSize pixelSizeOfImage;
7 | BOOL hasProcessedImage;
8 |
9 | dispatch_semaphore_t imageUpdateSemaphore;
10 | }
11 |
12 | // Initialization and teardown
13 | - (id)initWithURL:(NSURL *)url;
14 | - (id)initWithImage:(NSImage *)newImageSource;
15 | - (id)initWithCGImage:(CGImageRef)newImageSource;
16 | - (id)initWithImage:(NSImage *)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput;
17 | - (id)initWithCGImage:(CGImageRef)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput;
18 |
19 | // Image rendering
20 | - (void)processImage;
21 | - (BOOL)processImageWithCompletionHandler:(void (^)(void))completion;
22 | - (void)processImageUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(NSImage *processedImage))block;
23 | - (CGSize)outputImageSize;
24 |
25 | @end
26 |
--------------------------------------------------------------------------------
/GPUImage/Source/Mac/GPUImageView.h:
--------------------------------------------------------------------------------
1 | #import
2 | #import "GPUImageContext.h"
3 |
4 | typedef enum {
5 | kGPUImageFillModeStretch, // Stretch to fill the full view, which may distort the image outside of its normal aspect ratio
6 | kGPUImageFillModePreserveAspectRatio, // Maintains the aspect ratio of the source image, adding bars of the specified background color
7 | kGPUImageFillModePreserveAspectRatioAndFill // Maintains the aspect ratio of the source image, zooming in on its center to fill the view
8 | } GPUImageFillModeType;
9 |
10 | /**
11 | UIView subclass to use as an endpoint for displaying GPUImage outputs
12 | */
13 | @interface GPUImageView : NSOpenGLView
14 | {
15 | GPUImageRotationMode inputRotation;
16 | }
17 |
18 | /** The fill mode dictates how images are fit in the view, with the default being kGPUImageFillModePreserveAspectRatio
19 | */
20 | @property(readwrite, nonatomic) GPUImageFillModeType fillMode;
21 |
22 | /** This calculates the current display size, in pixels, taking into account Retina scaling factors
23 | */
24 | @property(readonly, nonatomic) CGSize sizeInPixels;
25 |
26 | @property(nonatomic) BOOL enabled;
27 |
28 | /** Handling fill mode
29 |
30 | @param redComponent Red component for background color
31 | @param greenComponent Green component for background color
32 | @param blueComponent Blue component for background color
33 | @param alphaComponent Alpha component for background color
34 | */
35 | - (void)setBackgroundColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent;
36 |
37 | - (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue;
38 |
39 | @end
40 |
--------------------------------------------------------------------------------
/GPUImage/Source/Mac/en.lproj/InfoPlist.strings:
--------------------------------------------------------------------------------
1 | /* Localized versions of Info.plist keys */
2 |
3 |
--------------------------------------------------------------------------------
/GPUImage/Source/iOS/Framework/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | en
7 | CFBundleExecutable
8 | ${EXECUTABLE_NAME}
9 | CFBundleIdentifier
10 | $(PRODUCT_BUNDLE_IDENTIFIER)
11 | CFBundleInfoDictionaryVersion
12 | 6.0
13 | CFBundleName
14 | ${PRODUCT_NAME}
15 | CFBundlePackageType
16 | FMWK
17 | CFBundleShortVersionString
18 | 0.1.4
19 | CFBundleSignature
20 | ????
21 | CFBundleVersion
22 | ${CURRENT_PROJECT_VERSION}
23 | NSPrincipalClass
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/GPUImage/Source/iOS/Framework/module.modulemap:
--------------------------------------------------------------------------------
1 | framework module GPUImage {
2 | umbrella header "GPUImageFramework.h"
3 |
4 | export *
5 | module * { export * }
6 | }
7 |
--------------------------------------------------------------------------------
/GPUImage/Source/iOS/GPUImage-Prefix.pch:
--------------------------------------------------------------------------------
1 | //
2 | // Prefix header for all source files of the 'GPUImage' target in the 'GPUImage' project
3 | //
4 |
5 | #ifdef __OBJC__
6 | #import
7 | #endif
8 |
--------------------------------------------------------------------------------
/GPUImage/Source/iOS/GPUImagePicture+TextureSubimage.h:
--------------------------------------------------------------------------------
1 | //
2 | // GPUImagePicture+TextureSubimage.h
3 | // GPUImage
4 | //
5 | // Created by Jack Wu on 2014-05-28.
6 | // Copyright (c) 2014 Brad Larson. All rights reserved.
7 | //
8 |
9 | #import "GPUImagePicture.h"
10 |
11 | @interface GPUImagePicture (TextureSubimage)
12 |
13 | - (void)replaceTextureWithSubimage:(UIImage*)subimage;
14 | - (void)replaceTextureWithSubCGImage:(CGImageRef)subimageSource;
15 |
16 | - (void)replaceTextureWithSubimage:(UIImage*)subimage inRect:(CGRect)subRect;
17 | - (void)replaceTextureWithSubCGImage:(CGImageRef)subimageSource inRect:(CGRect)subRect;
18 |
19 | @end
20 |
--------------------------------------------------------------------------------
/GPUImage/Source/iOS/GPUImagePicture.h:
--------------------------------------------------------------------------------
1 | #import
2 | #import "GPUImageOutput.h"
3 |
4 |
5 | @interface GPUImagePicture : GPUImageOutput
6 | {
7 | CGSize pixelSizeOfImage;
8 | BOOL hasProcessedImage;
9 |
10 | dispatch_semaphore_t imageUpdateSemaphore;
11 | }
12 |
13 | // Initialization and teardown
14 | - (id)initWithURL:(NSURL *)url;
15 | - (id)initWithImage:(UIImage *)newImageSource;
16 | - (id)initWithCGImage:(CGImageRef)newImageSource;
17 | - (id)initWithImage:(UIImage *)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput;
18 | - (id)initWithCGImage:(CGImageRef)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput;
19 | - (id)initWithImage:(UIImage *)newImageSource removePremultiplication:(BOOL)removePremultiplication;
20 | - (id)initWithCGImage:(CGImageRef)newImageSource removePremultiplication:(BOOL)removePremultiplication;
21 | - (id)initWithImage:(UIImage *)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput removePremultiplication:(BOOL)removePremultiplication;
22 | - (id)initWithCGImage:(CGImageRef)newImageSource smoothlyScaleOutput:(BOOL)smoothlyScaleOutput removePremultiplication:(BOOL)removePremultiplication;
23 |
24 | // Image rendering
25 | - (void)processImage;
26 | - (CGSize)outputImageSize;
27 |
28 | /**
29 | * Process image with all targets and filters asynchronously
30 | * The completion handler is called after processing finished in the
31 | * GPU's dispatch queue - and only if this method did not return NO.
32 | *
33 | * @returns NO if resource is blocked and processing is discarded, YES otherwise
34 | */
35 | - (BOOL)processImageWithCompletionHandler:(void (^)(void))completion;
36 | - (void)processImageUpToFilter:(GPUImageOutput *)finalFilterInChain withCompletionHandler:(void (^)(UIImage *processedImage))block;
37 |
38 | @end
39 |
--------------------------------------------------------------------------------
/GPUImage/Source/iOS/GPUImageView.h:
--------------------------------------------------------------------------------
1 | #import
2 | #import "GPUImageContext.h"
3 |
4 | typedef NS_ENUM(NSUInteger, GPUImageFillModeType) {
5 | kGPUImageFillModeStretch, // Stretch to fill the full view, which may distort the image outside of its normal aspect ratio
6 | kGPUImageFillModePreserveAspectRatio, // Maintains the aspect ratio of the source image, adding bars of the specified background color
7 | kGPUImageFillModePreserveAspectRatioAndFill // Maintains the aspect ratio of the source image, zooming in on its center to fill the view
8 | };
9 |
10 |
11 |
12 | /**
13 | UIView subclass to use as an endpoint for displaying GPUImage outputs
14 | */
15 | @interface GPUImageView : UIView
16 | {
17 | GPUImageRotationMode inputRotation;
18 | }
19 |
20 | /** The fill mode dictates how images are fit in the view, with the default being kGPUImageFillModePreserveAspectRatio
21 | */
22 | @property(readwrite, nonatomic) GPUImageFillModeType fillMode;
23 |
24 | /** This calculates the current display size, in pixels, taking into account Retina scaling factors
25 | */
26 | @property(readonly, nonatomic) CGSize sizeInPixels;
27 |
28 | @property(nonatomic) BOOL enabled;
29 |
30 | /** Handling fill mode
31 |
32 | @param redComponent Red component for background color
33 | @param greenComponent Green component for background color
34 | @param blueComponent Blue component for background color
35 | @param alphaComponent Alpha component for background color
36 | */
37 | - (void)setBackgroundColorRed:(GLfloat)redComponent green:(GLfloat)greenComponent blue:(GLfloat)blueComponent alpha:(GLfloat)alphaComponent;
38 |
39 | - (void)setCurrentlyReceivingMonochromeInput:(BOOL)newValue;
40 |
41 | @end
42 |
--------------------------------------------------------------------------------
/clibs/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/clibs/.DS_Store
--------------------------------------------------------------------------------
/clibs/3th-party/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/clibs/3th-party/.DS_Store
--------------------------------------------------------------------------------
/clibs/3th-party/libfaac/lib/libfaac.a:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/clibs/3th-party/libfaac/lib/libfaac.a
--------------------------------------------------------------------------------
/clibs/3th-party/librtmp/include/http.h:
--------------------------------------------------------------------------------
1 | #ifndef __RTMP_HTTP_H__
2 | #define __RTMP_HTTP_H__
3 | /*
4 | * Copyright (C) 2010 Howard Chu
5 | * Copyright (C) 2010 Antti Ajanki
6 | *
7 | * This file is part of librtmp.
8 | *
9 | * librtmp is free software; you can redistribute it and/or modify
10 | * it under the terms of the GNU Lesser General Public License as
11 | * published by the Free Software Foundation; either version 2.1,
12 | * or (at your option) any later version.
13 | *
14 | * librtmp is distributed in the hope that it will be useful,
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 | * GNU General Public License for more details.
18 | *
19 | * You should have received a copy of the GNU Lesser General Public License
20 | * along with librtmp see the file COPYING. If not, write to
21 | * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
22 | * Boston, MA 02110-1301, USA.
23 | * http://www.gnu.org/copyleft/lgpl.html
24 | */
25 |
26 | typedef enum {
27 | HTTPRES_OK, /* result OK */
28 | HTTPRES_OK_NOT_MODIFIED, /* not modified since last request */
29 | HTTPRES_NOT_FOUND, /* not found */
30 | HTTPRES_BAD_REQUEST, /* client error */
31 | HTTPRES_SERVER_ERROR, /* server reported an error */
32 | HTTPRES_REDIRECTED, /* resource has been moved */
33 | HTTPRES_LOST_CONNECTION /* connection lost while waiting for data */
34 | } HTTPResult;
35 |
36 | struct HTTP_ctx {
37 | char *date;
38 | int size;
39 | int status;
40 | void *data;
41 | };
42 |
43 | typedef size_t (HTTP_read_callback)(void *ptr, size_t size, size_t nmemb, void *stream);
44 |
45 | HTTPResult HTTP_get(struct HTTP_ctx *http, const char *url, HTTP_read_callback *cb);
46 |
47 | #endif
48 |
--------------------------------------------------------------------------------
/clibs/3th-party/librtmp/lib/librtmp.a:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/clibs/3th-party/librtmp/lib/librtmp.a
--------------------------------------------------------------------------------
/clibs/3th-party/libx264/include/x264_config.h:
--------------------------------------------------------------------------------
1 | #define X264_BIT_DEPTH 8
2 | #define X264_GPL 1
3 | #define X264_INTERLACED 1
4 | #define X264_CHROMA_FORMAT 0
5 | #define X264_REV 2705
6 | #define X264_REV_DIFF 0
7 | #define X264_VERSION " r2705 3f5ed56"
8 | #define X264_POINTVER "0.148.2705 3f5ed56"
9 |
--------------------------------------------------------------------------------
/clibs/3th-party/libx264/lib/libx264.a:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/clibs/3th-party/libx264/lib/libx264.a
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/clibs/3th-party/libyuv/.DS_Store
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2011 The LibYuv Project Authors. All rights reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef INCLUDE_LIBYUV_H_ // NOLINT
12 | #define INCLUDE_LIBYUV_H_
13 |
14 | #include "libyuv/basic_types.h"
15 | #include "libyuv/compare.h"
16 | #include "libyuv/convert.h"
17 | #include "libyuv/convert_argb.h"
18 | #include "libyuv/convert_from.h"
19 | #include "libyuv/convert_from_argb.h"
20 | #include "libyuv/cpu_id.h"
21 | #include "libyuv/format_conversion.h"
22 | #include "libyuv/mjpeg_decoder.h"
23 | #include "libyuv/planar_functions.h"
24 | #include "libyuv/rotate.h"
25 | #include "libyuv/rotate_argb.h"
26 | #include "libyuv/row.h"
27 | #include "libyuv/scale.h"
28 | #include "libyuv/scale_argb.h"
29 | #include "libyuv/scale_row.h"
30 | #include "libyuv/version.h"
31 | #include "libyuv/video_common.h"
32 |
33 | #endif // INCLUDE_LIBYUV_H_ NOLINT
34 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/prop-base/basic_types.h.svn-base:
--------------------------------------------------------------------------------
1 | K 13
2 | svn:eol-style
3 | V 2
4 | LF
5 | END
6 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/prop-base/compare.h.svn-base:
--------------------------------------------------------------------------------
1 | K 13
2 | svn:eol-style
3 | V 2
4 | LF
5 | END
6 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/prop-base/convert.h.svn-base:
--------------------------------------------------------------------------------
1 | K 13
2 | svn:eol-style
3 | V 2
4 | LF
5 | END
6 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/prop-base/convert_from.h.svn-base:
--------------------------------------------------------------------------------
1 | K 13
2 | svn:eol-style
3 | V 2
4 | LF
5 | END
6 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/prop-base/cpu_id.h.svn-base:
--------------------------------------------------------------------------------
1 | K 13
2 | svn:eol-style
3 | V 2
4 | LF
5 | END
6 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/prop-base/format_conversion.h.svn-base:
--------------------------------------------------------------------------------
1 | K 13
2 | svn:eol-style
3 | V 2
4 | LF
5 | END
6 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/prop-base/mjpeg_decoder.h.svn-base:
--------------------------------------------------------------------------------
1 | K 13
2 | svn:eol-style
3 | V 2
4 | LF
5 | END
6 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/prop-base/planar_functions.h.svn-base:
--------------------------------------------------------------------------------
1 | K 13
2 | svn:eol-style
3 | V 2
4 | LF
5 | END
6 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/prop-base/scale.h.svn-base:
--------------------------------------------------------------------------------
1 | K 13
2 | svn:eol-style
3 | V 2
4 | LF
5 | END
6 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/prop-base/version.h.svn-base:
--------------------------------------------------------------------------------
1 | K 13
2 | svn:eol-style
3 | V 2
4 | LF
5 | END
6 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/text-base/rotate_argb.h.svn-base:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2012 The LibYuv Project Authors. All rights reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef INCLUDE_LIBYUV_ROTATE_ARGB_H_ // NOLINT
12 | #define INCLUDE_LIBYUV_ROTATE_ARGB_H_
13 |
14 | #include "libyuv/basic_types.h"
15 | #include "libyuv/rotate.h" // For RotationMode.
16 |
17 | #ifdef __cplusplus
18 | namespace libyuv {
19 | extern "C" {
20 | #endif
21 |
22 | // Rotate ARGB frame
23 | LIBYUV_API
24 | int ARGBRotate(const uint8* src_argb, int src_stride_argb,
25 | uint8* dst_argb, int dst_stride_argb,
26 | int src_width, int src_height, enum RotationMode mode);
27 |
28 | #ifdef __cplusplus
29 | } // extern "C"
30 | } // namespace libyuv
31 | #endif
32 |
33 | #endif // INCLUDE_LIBYUV_ROTATE_ARGB_H_ NOLINT
34 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/text-base/scale_argb.h.svn-base:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2012 The LibYuv Project Authors. All rights reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef INCLUDE_LIBYUV_SCALE_ARGB_H_ // NOLINT
12 | #define INCLUDE_LIBYUV_SCALE_ARGB_H_
13 |
14 | #include "libyuv/basic_types.h"
15 | #include "libyuv/scale.h" // For FilterMode
16 |
17 | #ifdef __cplusplus
18 | namespace libyuv {
19 | extern "C" {
20 | #endif
21 |
22 | LIBYUV_API
23 | int ARGBScale(const uint8* src_argb, int src_stride_argb,
24 | int src_width, int src_height,
25 | uint8* dst_argb, int dst_stride_argb,
26 | int dst_width, int dst_height,
27 | enum FilterMode filtering);
28 |
29 | // Clipped scale takes destination rectangle coordinates for clip values.
30 | LIBYUV_API
31 | int ARGBScaleClip(const uint8* src_argb, int src_stride_argb,
32 | int src_width, int src_height,
33 | uint8* dst_argb, int dst_stride_argb,
34 | int dst_width, int dst_height,
35 | int clip_x, int clip_y, int clip_width, int clip_height,
36 | enum FilterMode filtering);
37 |
38 | #ifdef __cplusplus
39 | } // extern "C"
40 | } // namespace libyuv
41 | #endif
42 |
43 | #endif // INCLUDE_LIBYUV_SCALE_ARGB_H_ NOLINT
44 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/.svn/text-base/version.h.svn-base:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2012 The LibYuv Project Authors. All rights reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
12 | #define INCLUDE_LIBYUV_VERSION_H_
13 |
14 | #define LIBYUV_VERSION 703
15 |
16 | #endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
17 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/rotate_argb.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2012 The LibYuv Project Authors. All rights reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef INCLUDE_LIBYUV_ROTATE_ARGB_H_ // NOLINT
12 | #define INCLUDE_LIBYUV_ROTATE_ARGB_H_
13 |
14 | #include "libyuv/basic_types.h"
15 | #include "libyuv/rotate.h" // For RotationMode.
16 |
17 | #ifdef __cplusplus
18 | namespace libyuv {
19 | extern "C" {
20 | #endif
21 |
22 | // Rotate ARGB frame
23 | LIBYUV_API
24 | int ARGBRotate(const uint8* src_argb, int src_stride_argb,
25 | uint8* dst_argb, int dst_stride_argb,
26 | int src_width, int src_height, enum RotationMode mode);
27 |
28 | #ifdef __cplusplus
29 | } // extern "C"
30 | } // namespace libyuv
31 | #endif
32 |
33 | #endif // INCLUDE_LIBYUV_ROTATE_ARGB_H_ NOLINT
34 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/include/libyuv/version.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2012 The LibYuv Project Authors. All rights reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
12 | #define INCLUDE_LIBYUV_VERSION_H_
13 |
14 | #define LIBYUV_VERSION 1170
15 |
16 | #endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
17 |
--------------------------------------------------------------------------------
/clibs/3th-party/libyuv/lib/libyuv.a:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/clibs/3th-party/libyuv/lib/libyuv.a
--------------------------------------------------------------------------------
/clibs/libaw/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/clibs/libaw/.DS_Store
--------------------------------------------------------------------------------
/clibs/libaw/aw_all.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | libaw是我自己开发时建立的一个方便使用的c语言函数库,包括如下功能:
5 | 数组(存取任意数据),
6 | 字典(dict,map),
7 | 数据块(二进制数据读写存储),
8 | 内存分配(可跟踪),
9 | 文件操作,
10 | 线程池,
11 | librtmp封装
12 | 包含了libaw中的所有接口,外部使用只需要包含本文件就可以了。
13 | */
14 |
15 | #ifndef aw_all_h
16 | #define aw_all_h
17 |
18 | #include
19 | #include "aw_common.h"
20 | #include "aw_x264.h"
21 | #include "aw_faac.h"
22 | #include "aw_encode_flv.h"
23 | #include "aw_streamer.h"
24 | #include "aw_sw_faac_encoder.h"
25 | #include "aw_sw_x264_encoder.h"
26 |
27 | #endif /* aw_all_h */
28 |
--------------------------------------------------------------------------------
/clibs/libaw/common/aw_alloc.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | 对 malloc 和 free 进行封装,可以跟踪内存分配情况,方便调试
5 | */
6 |
7 | #ifndef aw_alloc_h
8 | #define aw_alloc_h
9 |
10 | #include
11 |
12 | #if defined(__FILE__) && defined(__LINE__)
13 | #define aw_alloc(size) aw_alloc_detail((size), __FILE__, __LINE__)
14 | #else
15 | #define aw_alloc(size) aw_alloc_detail((size), "", 0)
16 | #endif
17 |
18 | //可以监视内存的分配和释放,便于调试内存泄漏
19 |
20 | //自定义 alloc
21 | extern void * aw_alloc_detail(size_t size, const char *file_name, uint32_t line);
22 |
23 | //自定义 free
24 | extern void aw_free(void *);
25 |
26 | //开始debug alloc,调用此函数开始才记录分配和释放数量
27 | extern void aw_init_debug_alloc();
28 |
29 | //停止debug alloc,调用此函数后,不再记录分配释放。所有记录清0
30 | extern void aw_uninit_debug_alloc();
31 |
32 | //返回总共alloc的size
33 | extern size_t aw_total_alloc_size();
34 |
35 | //返回总共free的size
36 | extern size_t aw_total_free_size();
37 |
38 | //打印内存alloc/free/leak状况
39 | extern void aw_print_alloc_description();
40 |
41 | #endif /* aw_alloc_h */
42 |
--------------------------------------------------------------------------------
/clibs/libaw/common/aw_common.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | #ifndef aw_common_h
4 | #define aw_common_h
5 |
6 | #include
7 | #include "aw_alloc.h"
8 | #include "aw_array.h"
9 | #include "aw_data.h"
10 | #include "aw_dict.h"
11 | #include "aw_file.h"
12 | #include "aw_rtmp.h"
13 | #include "aw_utils.h"
14 |
15 | #endif /* aw_common_h */
16 |
--------------------------------------------------------------------------------
/clibs/libaw/common/aw_file.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | 封装了 unix 文件操作接口。
5 | 操作不一定能成功,使用时需要注意是否具有该文件对应操作的访问权限。
6 | */
7 |
8 | #ifndef aw_file_h
9 | #define aw_file_h
10 |
11 | #include
12 | #include "aw_data.h"
13 |
14 | //iOS无法读取 main bundle 中的文件
15 |
16 | //文件是否存在
17 | extern int8_t aw_is_file_exist(const char *file_path);
18 |
19 | //文件尺寸
20 | extern size_t aw_file_size(const char *file_path);
21 |
22 | //文件移除
23 | extern int8_t aw_remove_file(const char *file_path);
24 |
25 | //文件夹移除
26 | extern int8_t aw_remove_dir(const char *file_dir);
27 |
28 | //读取数据
29 | extern aw_data *aw_read_data_from_file(const char *file_path);
30 |
31 | //写入数据
32 | extern int8_t aw_write_data_to_file(const char *file_path, aw_data *data);
33 |
34 | //测试本文件
35 | extern void aw_test_file(const char *dir);
36 |
37 | #endif /* aw_file_h */
38 |
--------------------------------------------------------------------------------
/clibs/libaw/common/aw_thread_poll.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | 封装了便于使用的pthread线程池,使用时直接向线程池丢任务即可。
5 | */
6 |
7 | #ifndef aw_thread_h
8 | #define aw_thread_h
9 |
10 | #include
11 | #include
12 |
13 | typedef struct aw_thread_poll aw_thread_poll;
14 | typedef void (*aw_thread_func)(void *);
15 | typedef void (*aw_thread_finish_cb)();
16 |
17 | //创建线程池
18 | extern aw_thread_poll *alloc_aw_thread_poll(int limit, int stored_task_list_limit);
19 | //释放线程池
20 | extern void free_aw_thread_poll(aw_thread_poll **poll_p, aw_thread_finish_cb finish_cb);
21 | //添加任务
22 | extern void aw_add_task_to_thread_poll(aw_thread_poll *poll, aw_thread_func func, void *param);
23 |
24 | //累积的未发送的thread poll数量
25 | extern int aw_stored_task_count_in_thread_poll(aw_thread_poll *poll);
26 |
27 | extern void test_thread();
28 |
29 | #endif /* aw_thread_h */
30 |
--------------------------------------------------------------------------------
/clibs/libaw/common/aw_utils.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | utils log等便利函数
5 | */
6 |
7 | #ifndef aw_utils_h
8 | #define aw_utils_h
9 |
10 | #include
11 | #include
12 | #include "aw_alloc.h"
13 |
14 | #define AWLog(...) \
15 | do{ \
16 | printf(__VA_ARGS__); \
17 | printf("\n");\
18 | }while(0)
19 |
20 | #define aw_log(...) AWLog(__VA_ARGS__)
21 |
22 | //视频编码加速,stride须设置为16的倍数
23 | #define aw_stride(wid) ((wid % 16 != 0) ? ((wid) + 16 - (wid) % 16): (wid))
24 |
25 | #endif /* aw_utils_h */
26 |
--------------------------------------------------------------------------------
/clibs/libaw/pushStream/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hunter858/PushStreamSDK/c13ee32496008f579c68dca4562d97dc546eaffc/clibs/libaw/pushStream/.DS_Store
--------------------------------------------------------------------------------
/clibs/libaw/pushStream/encoder/aw_faac.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | aac软编码器:faac封装。
5 | */
6 |
7 | #ifndef aw_faac_h
8 | #define aw_faac_h
9 |
10 | #include
11 | #include "faac.h"
12 | #include "faaccfg.h"
13 | #include "aw_data.h"
14 |
15 | typedef struct aw_faac_config {
16 | //采样率
17 | int sample_rate;
18 |
19 | //单个样本大小
20 | int sample_size;
21 |
22 | //比特率
23 | int bitrate;
24 |
25 | //声道
26 | int channel_count;
27 | } aw_faac_config;
28 |
29 | extern aw_faac_config *alloc_aw_faac_config();
30 | extern void free_aw_faac_config(aw_faac_config **);
31 |
32 | typedef struct aw_faac_context {
33 | aw_faac_config config;
34 |
35 | //编码器句柄
36 | faacEncHandle *faac_handler;
37 |
38 | //最大输入样本数
39 | unsigned long max_input_sample_count;
40 | unsigned long max_input_byte_count;
41 |
42 | //最大输出字节数
43 | unsigned long max_output_byte_count;
44 |
45 | //缓冲区
46 | int8_t *aac_buffer;
47 |
48 | aw_data *audio_specific_config_data;
49 |
50 | //保存的每一帧的数据
51 | aw_data *encoded_aac_data;
52 | } aw_faac_context;
53 |
54 | extern aw_faac_context * alloc_aw_faac_context(aw_faac_config);
55 | extern void free_aw_faac_context(aw_faac_context **);
56 |
57 | extern void aw_encode_pcm_frame_2_aac(aw_faac_context *ctx, int8_t *pcm_data, long len);
58 |
59 | #endif /* aw_pcm2aac_h */
60 |
--------------------------------------------------------------------------------
/clibs/libaw/pushStream/encoder/aw_sw_faac_encoder.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | #ifndef aw_sw_faac_encoder_h
4 | #define aw_sw_faac_encoder_h
5 |
6 | /*
7 | 使用faac进行软编码:aac软编码器。
8 | */
9 |
10 | #include "aw_faac.h"
11 | #include "aw_encode_flv.h"
12 |
13 | //编码器开关
14 | extern void aw_sw_encoder_open_faac_encoder(aw_faac_config *faac_config);
15 | extern void aw_sw_encoder_close_faac_encoder();
16 |
17 | //对pcm数据进行faac软编码,并转成flv_audio_tag
18 | extern aw_flv_audio_tag *aw_sw_encoder_encode_faac_data(int8_t *pcm_data, long len, uint32_t timestamp);
19 | //根据faac_config 创建包含audio specific config 的flv tag
20 | extern aw_flv_audio_tag *aw_sw_encoder_create_faac_specific_config_tag();
21 |
22 | //获取每帧输入样本数量 用来计算时间戳,除以样本率就是一帧的duration。
23 | extern uint32_t aw_sw_faac_encoder_max_input_sample_count();
24 |
25 | //编码器是否合法
26 | extern int8_t aw_sw_faac_encoder_is_valid();
27 |
28 | //下面2个函数所有编码器都可以用
29 | //将aac数据转为flv_audio_tag
30 | extern aw_flv_audio_tag *aw_encoder_create_audio_tag(int8_t *aac_data, long len, uint32_t timeStamp, aw_faac_config *faac_cfg);
31 | //创建audio_specific_config_tag
32 | extern aw_flv_audio_tag *aw_encoder_create_audio_specific_config_tag(aw_data *audio_specific_config_data, aw_faac_config *faac_config);
33 |
34 |
35 | #endif /* aw_sw_audio_encoder_h */
36 |
--------------------------------------------------------------------------------
/clibs/libaw/pushStream/encoder/aw_sw_x264_encoder.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | #ifndef aw_sw_x264_encoder_h
4 | #define aw_sw_x264_encoder_h
5 |
6 | /*
7 | 使用x264进行软编码:h264软编码器
8 | */
9 |
10 | #include "aw_x264.h"
11 | #include "aw_encode_flv.h"
12 |
13 | //将采集到的video yuv数据,编码为flv video tag
14 | extern aw_flv_video_tag * aw_sw_encoder_encode_x264_data(int8_t *yuv_data, long len, long y_stride, uint32_t timeStamp);
15 |
16 | //根据flv/h264/aac协议创建video/audio首帧tag
17 | extern aw_flv_video_tag *aw_sw_encoder_create_x264_sps_pps_tag();
18 |
19 | //开关编码器
20 | extern void aw_sw_encoder_open_x264_encoder(aw_x264_config *x264_config);
21 | extern void aw_sw_encoder_close_x264_encoder();
22 |
23 | extern int8_t aw_sw_x264_encoder_is_valid();
24 |
25 | //创建 flv tag 跟编码无关。
26 | //将h264数据转为flv_video_tag
27 | extern aw_flv_video_tag *aw_encoder_create_video_tag(int8_t *h264_data, long len, uint32_t timeStamp, long composition_time, int8_t is_key_frame);
28 | //创建sps_pps_tag
29 | extern aw_flv_video_tag *aw_encoder_create_sps_pps_tag(aw_data *sps_pps_data);
30 |
31 | #endif /* aw_sw_video_encoder_h */
32 |
--------------------------------------------------------------------------------
/clibs/libaw/pushStream/encoder/aw_x264.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | h264软编码器:x264封装。
5 | */
6 |
7 | #ifndef aw_x264_h
8 | #define aw_x264_h
9 |
10 | #include
11 | #include "x264.h"
12 | #include "x264_config.h"
13 | #include "aw_data.h"
14 |
15 | typedef struct aw_x264_config {
16 | //宽高
17 | int width;
18 | int height;
19 |
20 | //帧率,1秒多少帧
21 | int fps;
22 |
23 | //码率
24 | int bitrate;
25 |
26 | //b帧数量
27 | int b_frame_count;
28 |
29 | //X264_CSP_NV12 || X264_CSP_I420
30 | int input_data_format;
31 |
32 | //
33 | int videoMaxKeyFrameInterval;
34 |
35 | //y & uv 对齐长度(16的整数倍)
36 | int y_stride;
37 | int uv_stride;
38 |
39 | }aw_x264_config;
40 |
41 | extern aw_x264_config *alloc_aw_x264_config();
42 | extern void free_aw_x264_config(aw_x264_config **);
43 |
44 | typedef struct aw_x264_context{
45 | //配置文件
46 | aw_x264_config config;
47 |
48 | //x264 handler
49 | x264_t *x264_handler;
50 |
51 | //编码过程变量
52 | //输入
53 | x264_picture_t *pic_in;
54 | //输出
55 | x264_picture_t *pic_out;
56 |
57 | //保存编码数据
58 | x264_nal_t *nal;
59 | int nal_count;
60 |
61 | aw_data *sps_pps_data;
62 |
63 | //保存的每一帧的数据
64 | aw_data *encoded_h264_data;
65 | }aw_x264_context;
66 |
67 | extern aw_x264_context *alloc_aw_x264_context(aw_x264_config);
68 | extern void free_aw_x264_context(aw_x264_context **);
69 |
70 | //编码一帧数据
71 | extern void aw_encode_yuv_frame_2_x264(aw_x264_context *aw_ctx, int8_t *yuv_frame, int y_stride, int len);
72 |
73 | //构造spspps
74 | extern aw_data *aw_create_sps_pps_data(uint8_t *sps_bytes, uint32_t sps_len, uint8_t *pps_bytes, uint32_t pps_len);
75 |
76 | #endif /* aw_yuv2h264_h */
77 |
--------------------------------------------------------------------------------
/clibs/libaw/pushStream/rtmp/aw_streamer.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | /*
4 | rtmp流控制,包括开关,写入flv音/视频数据。
5 | 编码好的flv数据通过此文件发送出去,客户端就可以播放了。
6 | */
7 |
8 | #ifndef aw_streamer_h
9 | #define aw_streamer_h
10 |
11 | #include
12 | #include "aw_all.h"
13 |
14 | //单例
15 | //打开流
16 | extern int8_t aw_streamer_open(const char *rtmp_url, aw_rtmp_state_changed_cb state_changed_cb);
17 | //关闭流
18 | extern void aw_streamer_close();
19 |
20 | //是否正在streaming
21 | extern int8_t aw_streamer_is_streaming();
22 |
23 | //发送视频flv tag
24 | extern void aw_streamer_send_video_data(aw_flv_video_tag *video_tag);
25 | //发送音频flv tag
26 | extern void aw_streamer_send_audio_data(aw_flv_audio_tag *audio_tag);
27 |
28 | //发送sps pps
29 | extern void aw_streamer_send_video_sps_pps_tag(aw_flv_video_tag *sps_pps_tag);
30 | //发送 audio specific config
31 | extern void aw_streamer_send_audio_specific_config_tag(aw_flv_audio_tag *asc_tag);
32 |
33 | #endif /* aw_streamer_h */
34 |
--------------------------------------------------------------------------------