From 4c6506c38baf5c12758b4d9199cbb5337238c2d1 Mon Sep 17 00:00:00 2001 From: lambiengcode Date: Tue, 18 Jul 2023 12:44:22 +0700 Subject: [PATCH 1/9] feat(virtual-background): implement firebase ml selfie for split persons from original frame --- android/build.gradle | 11 +- android/local.properties | 4 +- .../cloudwebrtc/webrtc/GetUserMediaImpl.java | 251 ++++++++++++++++++ 3 files changed, 262 insertions(+), 4 deletions(-) diff --git a/android/build.gradle b/android/build.gradle index 692fc79a..1ff60952 100644 --- a/android/build.gradle +++ b/android/build.gradle @@ -31,7 +31,7 @@ android { compileSdkVersion 31 defaultConfig { - minSdkVersion 21 + minSdkVersion 23 testInstrumentationRunner 'androidx.test.runner.AndroidJUnitRunner' consumerProguardFiles 'proguard-rules.pro' } @@ -51,9 +51,16 @@ android { } dependencies { - implementation 'io.github.webrtc-sdk:android:114.5735.02' + implementation 'io.github.webrtc-sdk:android:114.5735.02' implementation 'com.twilio:audioswitch:1.1.8' implementation 'androidx.annotation:annotation:1.1.0' implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version" + + // ML Kit + implementation 'com.google.mlkit:segmentation-selfie:16.0.0-beta3' + + // Libyuv + implementation "io.github.crow-misia.libyuv:libyuv-android:0.28.0" + implementation 'androidx.camera:camera-core:1.0.2' // implementation files('libwebrtc.aar') } diff --git a/android/local.properties b/android/local.properties index 46a8ab13..b18a9d80 100644 --- a/android/local.properties +++ b/android/local.properties @@ -4,5 +4,5 @@ # Location of the SDK. This is only used by Gradle. # For customization when using a Version Control System, please read the # header note. -#Sat May 20 23:50:57 ICT 2023 -sdk.dir=/home/lambiengcode/Android/Sdk +#Tue Jul 18 10:35:26 ICT 2023 +sdk.dir=/Users/lambiengcode/Library/Android/sdk diff --git a/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java b/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java index 050f8afd..5fbfacf0 100755 --- a/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java +++ b/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java @@ -8,6 +8,8 @@ import android.content.Context; import android.content.Intent; import android.content.pm.PackageManager; +import android.graphics.Bitmap; +import android.graphics.ImageFormat; import android.hardware.Camera; import android.hardware.Camera.Parameters; import android.hardware.camera2.CameraAccessException; @@ -33,6 +35,7 @@ import android.view.Surface; import android.view.WindowManager; +import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.annotation.RequiresApi; @@ -48,6 +51,14 @@ import com.cloudwebrtc.webrtc.utils.MediaConstraintsUtils; import com.cloudwebrtc.webrtc.utils.ObjectType; import com.cloudwebrtc.webrtc.utils.PermissionUtils; +import com.google.android.gms.tasks.OnFailureListener; +import com.google.android.gms.tasks.OnSuccessListener; +import com.google.mlkit.common.MlKitException; +import com.google.mlkit.vision.common.InputImage; +import com.google.mlkit.vision.segmentation.Segmentation; +import com.google.mlkit.vision.segmentation.SegmentationMask; +import com.google.mlkit.vision.segmentation.Segmenter; +import com.google.mlkit.vision.segmentation.selfie.SelfieSegmenterOptions; import org.webrtc.AudioSource; import org.webrtc.AudioTrack; @@ -58,18 +69,24 @@ import org.webrtc.CameraEnumerationAndroid.CaptureFormat; import org.webrtc.CameraEnumerator; import org.webrtc.CameraVideoCapturer; +import org.webrtc.JavaI420Buffer; import org.webrtc.MediaConstraints; import org.webrtc.MediaStream; import org.webrtc.MediaStreamTrack; import org.webrtc.PeerConnectionFactory; import org.webrtc.SurfaceTextureHelper; import org.webrtc.VideoCapturer; +import org.webrtc.VideoFrame; +import org.webrtc.VideoProcessor; +import org.webrtc.VideoSink; import org.webrtc.VideoSource; import org.webrtc.VideoTrack; +import org.webrtc.YuvHelper; import org.webrtc.audio.JavaAudioDeviceModule; import java.io.File; import java.lang.reflect.Field; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -77,6 +94,14 @@ import io.flutter.plugin.common.MethodChannel.Result; +import android.graphics.Bitmap; +import android.graphics.BitmapFactory; +import android.graphics.Canvas; +import android.graphics.PorterDuff; +import android.media.Image; +import android.util.Log; +import androidx.camera.core.ImageProxy; + /** * The implementation of {@code getUserMedia} extracted into a separate file in order to reduce * complexity and to (somewhat) separate concerns. @@ -112,6 +137,14 @@ class GetUserMediaImpl { private final SparseArray mediaRecorders = new SparseArray<>(); private AudioDeviceInfo preferredInput = null; + private final SelfieSegmenterOptions segmentOptions = new SelfieSegmenterOptions.Builder() + .setDetectorMode(SelfieSegmenterOptions.SINGLE_IMAGE_MODE) + .build(); + private final Segmenter segmenter = Segmentation.getClient(segmentOptions); + + private VideoSource vbVideoSource = null; + private VideoSink vbVideoSink = null; + public void screenRequestPermissions(ResultReceiver resultReceiver) { final Activity activity = stateProvider.getActivity(); if (activity == null) { @@ -739,6 +772,9 @@ private ConstraintsMap getUserVideo(ConstraintsMap constraints, MediaStream medi PeerConnectionFactory pcFactory = stateProvider.getPeerConnectionFactory(); VideoSource videoSource = pcFactory.createVideoSource(false); + + vbVideoSource = videoSource; + String threadName = Thread.currentThread().getName() + "_texture_camera_thread"; SurfaceTextureHelper surfaceTextureHelper = SurfaceTextureHelper.create(threadName, EglUtils.getRootEglBaseContext()); @@ -802,6 +838,221 @@ private ConstraintsMap getUserVideo(ConstraintsMap constraints, MediaStream medi return trackParams; } + void setVirtualBackground() { + vbVideoSource.setVideoProcessor(new VideoProcessor() { + @Override + public void onCapturerStarted(boolean success) { + // Xử lý khi bắt đầu capture video + } + + @Override + public void onCapturerStopped() { + // Xử lý khi dừng capture video + } + + @Override + public void onFrameCaptured(VideoFrame frame) { + // Chuyển đổi frame thành bitmap + Bitmap bitmap = videoFrameToBitmap(frame); + + // Xử lý segment với bitmap + processSegmentation(bitmap); + } + + @Override + public void setSink(VideoSink sink) { + // Lưu sink để gửi frame đã được cập nhật trở lại WebRTC + // Sink sẽ được sử dụng sau khi xử lý segment + vbVideoSink = sink; + } + }); + } + + public Bitmap videoFrameToBitmap(VideoFrame videoFrame) { + VideoFrame.Buffer buffer = videoFrame.getBuffer(); + int width = buffer.getWidth(); + int height = buffer.getHeight(); + + if (buffer instanceof VideoFrame.TextureBuffer) { + // Không hỗ trợ trực tiếp chuyển đổi từ TextureBuffer sang Bitmap + return null; + } else if (buffer instanceof VideoFrame.I420Buffer) { + VideoFrame.I420Buffer i420Buffer = (VideoFrame.I420Buffer) buffer; + + int ySize = width * height; + int uvSize = width * height / 4; + + ByteBuffer dataY = i420Buffer.getDataY(); + ByteBuffer dataU = i420Buffer.getDataU(); + ByteBuffer dataV = i420Buffer.getDataV(); + + byte[] dataYArray = new byte[ySize]; + byte[] dataUArray = new byte[uvSize]; + byte[] dataVArray = new byte[uvSize]; + + dataY.get(dataYArray); + dataU.get(dataUArray); + dataV.get(dataVArray); + + // Chuyển đổi từ YUV sang RGB + int[] rgbData = convertYUVtoRGB(dataYArray, dataUArray, dataVArray, width, height); + + // Tạo Bitmap từ dữ liệu RGB + Bitmap bitmap = Bitmap.createBitmap(rgbData, width, height, Bitmap.Config.ARGB_8888); + + return bitmap; + } + + return null; + } + + private int[] convertYUVtoRGB(byte[] yData, byte[] uData, byte[] vData, int width, int height) { + int[] rgbData = new int[width * height]; + int uvIndex = 0; + int yOffset = 0; + + for (int y = 0; y < height; y++) { + int uvRowStart = uvIndex; + int uvRowOffset = y >> 1; + + for (int x = 0; x < width; x++) { + int yIndex = yOffset + x; + int uvIndexOffset = uvRowStart + (x >> 1); + + int yValue = yData[yIndex] & 0xFF; + int uValue = uData[uvIndexOffset] & 0xFF; + int vValue = vData[uvIndexOffset] & 0xFF; + + int r = yValue + (int) (1.370705f * (vValue - 128)); + int g = yValue - (int) (0.698001f * (vValue - 128)) - (int) (0.337633f * (uValue - 128)); + int b = yValue + (int) (1.732446f * (uValue - 128)); + + r = Math.max(0, Math.min(255, r)); + g = Math.max(0, Math.min(255, g)); + b = Math.max(0, Math.min(255, b)); + + int pixelColor = 0xFF000000 | (r << 16) | (g << 8) | b; + rgbData[y * width + x] = pixelColor; + } + + if (y % 2 == 1) { + uvIndex = uvRowStart + width / 2; + yOffset += width; + } + } + + return rgbData; + } + + private void processSegmentation(Bitmap bitmap) { + // Tạo InputImage từ bitmap + InputImage inputImage = InputImage.fromBitmap(bitmap, 0); + + // Xử lý phân đoạn + segmenter.process(inputImage) + .addOnSuccessListener(new OnSuccessListener() { + @Override + public void onSuccess(SegmentationMask segmentationMask) { + // Xử lý khi phân đoạn thành công + ByteBuffer mask = segmentationMask.getBuffer(); + int maskWidth = segmentationMask.getWidth(); + int maskHeight = segmentationMask.getHeight(); + mask.rewind(); + + // Chuyển đổi buffer thành mảng màu + int[] colors = maskColorsFromByteBuffer(mask, maskWidth, maskHeight); + + // Tạo bitmap đã được phân đoạn từ mảng màu + Bitmap segmentedBitmap = createBitmapFromColors(colors, maskWidth, maskHeight); + + // Vẽ ảnh nền đã phân đoạn lên canvas + Bitmap outputBitmap = drawSegmentedBackground(segmentedBitmap, segmentedBitmap); + + // Tạo VideoFrame mới từ bitmap đã xử lý + int frameRotation = 180; // Frame rotation angle (customize as needed) + long frameTimestamp = System.nanoTime(); // Frame timestamp (customize as needed) + VideoFrame outputVideoFrame = createVideoFrame(outputBitmap, frameRotation, frameTimestamp); + + // Gửi frame đã được cập nhật trở lại WebRTC + vbVideoSink.onFrame(outputVideoFrame); + } + }) + .addOnFailureListener(new OnFailureListener() { + @Override + public void onFailure(@NonNull Exception exception) { + // Xử lý khi phân đoạn thất bại + Log.e(TAG, "Segmentation failed: " + exception.getMessage()); + } + }); + } + + private Bitmap drawSegmentedBackground(Bitmap segmentedBitmap, Bitmap backgroundBitmap) { + Bitmap outputBitmap = Bitmap.createBitmap( + segmentedBitmap.getWidth(), segmentedBitmap.getHeight(), Bitmap.Config.ARGB_8888 + ); + Canvas canvas = new Canvas(outputBitmap); + + // Vẽ ảnh nền đã phân đoạn lên canvas + canvas.drawBitmap(backgroundBitmap, 0, 0, null); + canvas.drawBitmap(segmentedBitmap, 0, 0, null); + + return outputBitmap; + } + + private VideoFrame createVideoFrame(Bitmap bitmap, int rotation, long timestampNs) { + ByteBuffer buffer = ByteBuffer.allocate(bitmap.getByteCount()); + bitmap.copyPixelsToBuffer(buffer); + byte[] data = buffer.array(); + + int width = bitmap.getWidth(); + int height = bitmap.getHeight(); + int strideY = width; + int strideU = (width + 1) / 2; + int strideV = (width + 1) / 2; + + byte[] dataU = new byte[width * height / 4]; + byte[] dataV = new byte[width * height / 4]; + for (int i = 0; i < width * height / 4; i++) { + dataU[i] = data[width * height + i]; + dataV[i] = data[width * height + width * height / 4 + i]; + } + + Runnable releaseCallback = () -> { + // Thực hiện các thao tác giải phóng tài nguyên liên quan tại đây (nếu có) + }; + + VideoFrame.I420Buffer i420Buffer = JavaI420Buffer.wrap( + width, + height, + ByteBuffer.wrap(data), + strideY, + ByteBuffer.wrap(dataU), + strideU, ByteBuffer.wrap(dataV), strideV, releaseCallback + ); + + return new VideoFrame(i420Buffer, rotation, timestampNs); + } + + + // Hàm chuyển đổi buffer thành mảng màu + private int[] maskColorsFromByteBuffer(ByteBuffer buffer, int width, int height) { + // Chuyển đổi từ ByteBuffer thành mảng màu, tùy thuộc vào định dạng màu + // của buffer. Đảm bảo bạn sử dụng đúng định dạng màu tương ứng với + // phân đoạn của ML Kit. + // Trong ví dụ này, chúng tôi giả định rằng buffer có định dạng ARGB_8888. + + // Ví dụ: chuyển đổi từ ByteBuffer thành mảng ARGB_8888 + int[] colors = new int[width * height]; + buffer.asIntBuffer().get(colors); + + return colors; + } + + // Hàm tạo bitmap từ mảng màu + private Bitmap createBitmapFromColors(int[] colors, int width, int height) { + return Bitmap.createBitmap(colors, width, height, Bitmap.Config.ARGB_8888); + } + void removeVideoCapturerSync(String id) { synchronized (mVideoCapturers) { VideoCapturerInfo info = mVideoCapturers.get(id); From 1652913d3cd0c60d4dfc1246da409dbaa1685571 Mon Sep 17 00:00:00 2001 From: lambiengcode Date: Tue, 18 Jul 2023 12:46:23 +0700 Subject: [PATCH 2/9] migration(android): Upgrade AGP dependency from 7.3.0 to 7.4.2 --- example/android/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example/android/build.gradle b/example/android/build.gradle index f7eb7f63..ce647a43 100644 --- a/example/android/build.gradle +++ b/example/android/build.gradle @@ -6,7 +6,7 @@ buildscript { } dependencies { - classpath 'com.android.tools.build:gradle:7.3.0' + classpath 'com.android.tools.build:gradle:7.4.2' classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" } } From af067c6357ce4939f58e23a3a98df4bcdc5ea72c Mon Sep 17 00:00:00 2001 From: lambiengcode Date: Tue, 18 Jul 2023 13:21:16 +0700 Subject: [PATCH 3/9] fix: github actions --- .../src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java | 2 +- ios/flutter_webrtc.podspec | 2 +- pubspec.yaml | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java b/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java index 5fbfacf0..9b6b8f23 100755 --- a/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java +++ b/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java @@ -952,7 +952,7 @@ private void processSegmentation(Bitmap bitmap) { segmenter.process(inputImage) .addOnSuccessListener(new OnSuccessListener() { @Override - public void onSuccess(SegmentationMask segmentationMask) { + public void onSuccess(@NonNull SegmentationMask segmentationMask) { // Xử lý khi phân đoạn thành công ByteBuffer mask = segmentationMask.getBuffer(); int maskWidth = segmentationMask.getWidth(); diff --git a/ios/flutter_webrtc.podspec b/ios/flutter_webrtc.podspec index 61771f74..f9d935e7 100644 --- a/ios/flutter_webrtc.podspec +++ b/ios/flutter_webrtc.podspec @@ -16,6 +16,6 @@ A new flutter plugin project. s.public_header_files = 'Classes/**/*.h' s.dependency 'Flutter' s.dependency 'WebRTC-lbc', '116.5845.02' - s.ios.deployment_target = '10.0' + s.ios.deployment_target = '11.0' s.static_framework = true end diff --git a/pubspec.yaml b/pubspec.yaml index bb0e2a28..9c8452d3 100644 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -2,6 +2,7 @@ name: flutter_webrtc description: Flutter WebRTC plugin for iOS/Android/Destkop/Web, based on GoogleWebRTC. version: 0.9.36+2 homepage: https://github.com/cloudwebrtc/flutter-webrtc +publish_to: none environment: sdk: '>=2.12.0 <4.0.0' flutter: '>=1.22.0' From 1692095214acde2419c47f2282efa738f56ca3ac Mon Sep 17 00:00:00 2001 From: lambiengcode Date: Tue, 18 Jul 2023 13:44:26 +0700 Subject: [PATCH 4/9] update . --- example/android/app/build.gradle | 2 +- example/ios/Podfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/example/android/app/build.gradle b/example/android/app/build.gradle index 459b756e..5c8a320d 100644 --- a/example/android/app/build.gradle +++ b/example/android/app/build.gradle @@ -39,7 +39,7 @@ android { applicationId "com.cloudwebrtc.flutterflutterexample.flutter_webrtc_example" // You can update the following values to match your application needs. // For more information, see: https://docs.flutter.dev/deployment/android#reviewing-the-gradle-build-configuration. - minSdkVersion 21 + minSdkVersion 23 targetSdkVersion 31 versionCode flutterVersionCode.toInteger() versionName flutterVersionName diff --git a/example/ios/Podfile b/example/ios/Podfile index ec43b513..9dd92026 100644 --- a/example/ios/Podfile +++ b/example/ios/Podfile @@ -1,5 +1,5 @@ # Uncomment this line to define a global platform for your project -# platform :ios, '11.0' +platform :ios, '11.0' # CocoaPods analytics sends network stats synchronously affecting flutter build latency. ENV['COCOAPODS_DISABLE_STATS'] = 'true' From 8b9c7888c9f271861f4f339f4e9237ed13fb9e4f Mon Sep 17 00:00:00 2001 From: lambiengcode Date: Tue, 18 Jul 2023 13:51:52 +0700 Subject: [PATCH 5/9] update . --- example/ios/Podfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example/ios/Podfile b/example/ios/Podfile index 9dd92026..06630d1a 100644 --- a/example/ios/Podfile +++ b/example/ios/Podfile @@ -1,5 +1,5 @@ # Uncomment this line to define a global platform for your project -platform :ios, '11.0' +platform :ios, '12.0' # CocoaPods analytics sends network stats synchronously affecting flutter build latency. ENV['COCOAPODS_DISABLE_STATS'] = 'true' From aa2870e391ac7cf94bf1633ff7be394ec872a2bb Mon Sep 17 00:00:00 2001 From: lambiengcode Date: Tue, 18 Jul 2023 14:05:16 +0700 Subject: [PATCH 6/9] chore: Bump version 0.9.36+3 --- pubspec.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pubspec.yaml b/pubspec.yaml index 9c8452d3..cfba6be4 100644 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -1,6 +1,6 @@ name: flutter_webrtc description: Flutter WebRTC plugin for iOS/Android/Destkop/Web, based on GoogleWebRTC. -version: 0.9.36+2 +version: 0.9.36+3 homepage: https://github.com/cloudwebrtc/flutter-webrtc publish_to: none environment: From 306ba393683533cecd465d457ede4adf7dbd4b09 Mon Sep 17 00:00:00 2001 From: lambiengcode Date: Tue, 18 Jul 2023 15:12:14 +0700 Subject: [PATCH 7/9] feat: execute virtual bg on Android --- .../src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java b/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java index 9b6b8f23..02dca418 100755 --- a/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java +++ b/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java @@ -775,6 +775,8 @@ private ConstraintsMap getUserVideo(ConstraintsMap constraints, MediaStream medi vbVideoSource = videoSource; + setVirtualBackground(); + String threadName = Thread.currentThread().getName() + "_texture_camera_thread"; SurfaceTextureHelper surfaceTextureHelper = SurfaceTextureHelper.create(threadName, EglUtils.getRootEglBaseContext()); From ab48793dfa4cf79a5ddbccc50e57ade96822307c Mon Sep 17 00:00:00 2001 From: lambiengcode Date: Tue, 18 Jul 2023 15:12:40 +0700 Subject: [PATCH 8/9] chore: Bump version 0.9.36+4 --- pubspec.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pubspec.yaml b/pubspec.yaml index cfba6be4..b30595c4 100644 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -1,6 +1,6 @@ name: flutter_webrtc description: Flutter WebRTC plugin for iOS/Android/Destkop/Web, based on GoogleWebRTC. -version: 0.9.36+3 +version: 0.9.36+4 homepage: https://github.com/cloudwebrtc/flutter-webrtc publish_to: none environment: From 406a6e9431dce6136d152a8d4a74c852aca993c2 Mon Sep 17 00:00:00 2001 From: lambiengcode Date: Wed, 19 Jul 2023 22:56:31 +0700 Subject: [PATCH 9/9] feat: virtual bg android done --- android/build.gradle | 9 +- android/proguard-rules.pro | 2 +- .../webrtc/FlutterRTCVirtualBackground.kt | 456 ++++++++++++++++++ .../cloudwebrtc/webrtc/GetUserMediaImpl.java | 260 +--------- .../webrtc/MethodCallHandlerImpl.java | 27 +- lib/src/helper.dart | 15 + 6 files changed, 505 insertions(+), 264 deletions(-) create mode 100644 android/src/main/java/com/cloudwebrtc/webrtc/FlutterRTCVirtualBackground.kt diff --git a/android/build.gradle b/android/build.gradle index 1ff60952..93653845 100644 --- a/android/build.gradle +++ b/android/build.gradle @@ -53,14 +53,11 @@ android { dependencies { implementation 'io.github.webrtc-sdk:android:114.5735.02' implementation 'com.twilio:audioswitch:1.1.8' - implementation 'androidx.annotation:annotation:1.1.0' + implementation 'androidx.annotation:annotation:1.6.0' implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk7:$kotlin_version" // ML Kit - implementation 'com.google.mlkit:segmentation-selfie:16.0.0-beta3' - - // Libyuv - implementation "io.github.crow-misia.libyuv:libyuv-android:0.28.0" - implementation 'androidx.camera:camera-core:1.0.2' + implementation 'com.google.mlkit:segmentation-selfie:16.0.0-beta4' + implementation 'androidx.camera:camera-core:1.2.3' // implementation files('libwebrtc.aar') } diff --git a/android/proguard-rules.pro b/android/proguard-rules.pro index 6ce98961..54f34ec6 100644 --- a/android/proguard-rules.pro +++ b/android/proguard-rules.pro @@ -1,3 +1,3 @@ # Flutter WebRTC -keep class com.cloudwebrtc.webrtc.** { *; } --keep class org.webrtc.** { *; } +-keep class org.webrtc.** { *; } \ No newline at end of file diff --git a/android/src/main/java/com/cloudwebrtc/webrtc/FlutterRTCVirtualBackground.kt b/android/src/main/java/com/cloudwebrtc/webrtc/FlutterRTCVirtualBackground.kt new file mode 100644 index 00000000..f4be51bf --- /dev/null +++ b/android/src/main/java/com/cloudwebrtc/webrtc/FlutterRTCVirtualBackground.kt @@ -0,0 +1,456 @@ +package com.cloudwebrtc.webrtc + +import android.graphics.Bitmap +import android.graphics.BitmapFactory +import android.graphics.Canvas +import android.graphics.Color +import android.graphics.ImageFormat +import android.graphics.Matrix +import android.graphics.Rect +import android.graphics.YuvImage +import android.opengl.GLES20 +import android.opengl.GLUtils +import android.util.Log +import com.google.android.gms.tasks.Task +import com.google.mlkit.vision.common.InputImage +import com.google.mlkit.vision.segmentation.Segmentation +import com.google.mlkit.vision.segmentation.SegmentationMask +import com.google.mlkit.vision.segmentation.selfie.SelfieSegmenterOptions +import org.webrtc.EglBase +import org.webrtc.SurfaceTextureHelper +import org.webrtc.TextureBufferImpl +import org.webrtc.VideoFrame +import org.webrtc.VideoProcessor +import org.webrtc.VideoSink +import org.webrtc.VideoSource +import org.webrtc.YuvConverter +import org.webrtc.YuvHelper +import java.io.ByteArrayOutputStream +import java.nio.ByteBuffer +import java.util.Arrays +import kotlin.math.max + +class FlutterRTCVirtualBackground { + val TAG = FlutterWebRTCPlugin.TAG + + private var videoSource: VideoSource? = null + private var textureHelper: SurfaceTextureHelper? = null + private var backgroundBitmap: Bitmap? = null + private var expectConfidence = 0.7 + private val segmentOptions = SelfieSegmenterOptions.Builder() + .setDetectorMode(SelfieSegmenterOptions.STREAM_MODE) + .enableRawSizeMask() + .setStreamModeSmoothingRatio(1.0f) + .build() + private val segmenter = Segmentation.getClient(segmentOptions) + + // MARK: Public functions + + /** + * Initialize the VirtualBackgroundManager with the given VideoSource. + * + * @param videoSource The VideoSource to be used for video capturing. + */ + fun initialize(videoSource: VideoSource) { + this.videoSource = videoSource + setVirtualBackground() + } + + /** + * Dispose of the VirtualBackgroundManager, clearing its references and configurations. + */ + fun dispose() { + this.videoSource = null + this.expectConfidence = 0.7 + setBackgroundIsNull() + } + + fun setBackgroundIsNull() { + this.backgroundBitmap = null + } + + /** + * Configure the virtual background by setting the background bitmap and the desired confidence level. + * + * @param bgBitmap The background bitmap to be used for virtual background replacement. + * @param confidence The confidence level (0 to 1) for selecting the foreground in the segmentation mask. + */ + fun configurationVirtualBackground(bgBitmap: Bitmap, confidence: Double) { + backgroundBitmap = bgBitmap + expectConfidence = confidence + } + + /** + * Set up the virtual background processing by attaching a VideoProcessor to the VideoSource. + * The VideoProcessor will handle capturing video frames, performing segmentation, and replacing the background. + */ + private fun setVirtualBackground() { + // Create an instance of EglBase + val eglBase = EglBase.create() + textureHelper = SurfaceTextureHelper.create("SurfaceTextureThread", eglBase.eglBaseContext) + + // Attach a VideoProcessor to the VideoSource to process captured video frames + videoSource!!.setVideoProcessor(object : VideoProcessor { + private var sink: VideoSink? = null + + override fun onCapturerStarted(success: Boolean) { + // Handle video capture start event + } + + override fun onCapturerStopped() { + // Handle video capture stop event + } + + override fun onFrameCaptured(frame: VideoFrame) { + if (sink != null) { + if (backgroundBitmap == null) { + // If no background is set, pass the original frame to the sink + sink!!.onFrame(frame) + } else { + // Otherwise, perform segmentation on the captured frame and replace the background + val inputFrameBitmap: Bitmap? = videoFrameToBitmap(frame) + if (inputFrameBitmap != null) { + runSegmentationInBackground(inputFrameBitmap, frame, sink!!) + } else { + Log.d(TAG, "Convert video frame to bitmap failure") + } + } + } + } + + override fun setSink(sink: VideoSink?) { + // Store the VideoSink to send the processed frame back to WebRTC + // The sink will be used after segmentation processing + this.sink = sink + } + }) + } + + /** + * Perform segmentation on the input bitmap in the background thread. + * After segmentation, the background is replaced with the configured virtual background. + * + * @param inputFrameBitmap The input frame bitmap to be segmented. + * @param frame The original VideoFrame metadata for the input bitmap. + * @param sink The VideoSink to send the processed frame back to WebRTC. + */ + private fun runSegmentationInBackground( + inputFrameBitmap: Bitmap, + frame: VideoFrame, + sink: VideoSink + ) { + Thread { + // Perform segmentation in the background thread + processSegmentation(inputFrameBitmap, frame, sink) + }.start() + } + + /** + * Convert a VideoFrame to a Bitmap for further processing. + * + * @param videoFrame The input VideoFrame to be converted. + * @return The corresponding Bitmap representation of the VideoFrame. + */ + private fun videoFrameToBitmap(videoFrame: VideoFrame): Bitmap? { + // Retain the VideoFrame to prevent it from being garbage collected + videoFrame.retain() + + // Convert the VideoFrame to I420 format + val buffer = videoFrame.buffer + val i420Buffer = buffer.toI420() + val y = i420Buffer!!.dataY + val u = i420Buffer.dataU + val v = i420Buffer.dataV + val width = i420Buffer.width + val height = i420Buffer.height + val strides = intArrayOf( + i420Buffer.strideY, + i420Buffer.strideU, + i420Buffer.strideV + ) + // Convert I420 format to NV12 format as required by YuvImage + val chromaWidth = (width + 1) / 2 + val chromaHeight = (height + 1) / 2 + val minSize = width * height + chromaWidth * chromaHeight * 2 + val yuvBuffer = ByteBuffer.allocateDirect(minSize) + YuvHelper.I420ToNV12( + y, + strides[0], + v, + strides[2], + u, + strides[1], + yuvBuffer, + width, + height + ) + // Remove leading 0 from the ByteBuffer + val cleanedArray = + Arrays.copyOfRange(yuvBuffer.array(), yuvBuffer.arrayOffset(), minSize) + val yuvImage = YuvImage( + cleanedArray, + ImageFormat.NV21, + width, + height, + null + ) + i420Buffer.release() + videoFrame.release() + + // Convert YuvImage to byte array + val outputStream = ByteArrayOutputStream() + yuvImage.compressToJpeg( + Rect(0, 0, yuvImage.width, yuvImage.height), + 100, + outputStream + ) + val jpegData = outputStream.toByteArray() + + // Convert byte array to Bitmap + return BitmapFactory.decodeByteArray(jpegData, 0, jpegData.size) + } + + /** + * Process the segmentation of the input bitmap using the AI segmenter. + * The resulting segmented bitmap is then combined with the provided background bitmap, + * and the final output frame is sent to the video sink. + * + * @param bitmap The input bitmap to be segmented. + * @param original The original video frame for metadata reference (rotation, timestamp, etc.). + * @param sink The VideoSink to receive the processed video frame. + */ + private fun processSegmentation(bitmap: Bitmap, original: VideoFrame, sink: VideoSink) { + // Create an InputImage from the input bitmap + val inputImage = InputImage.fromBitmap(bitmap, 0) + + // Perform segmentation using the AI segmenter + val result = segmenter.process(inputImage) + result.addOnCompleteListener { task: Task -> + if (task.isSuccessful) { + // Segmentation process successful + val segmentationMask = task.result + val mask = segmentationMask.buffer + val maskWidth = segmentationMask.width + val maskHeight = segmentationMask.height + mask.rewind() + + // Convert the buffer to an array of colors + val colors = maskColorsFromByteBuffer( + mask, + maskWidth, + maskHeight, + bitmap, + bitmap.width, + bitmap.height + ) + + // Create a segmented bitmap from the array of colors + val segmentedBitmap = + createBitmapFromColors(colors, bitmap.width, bitmap.height) + + + if (backgroundBitmap == null) { + return@addOnCompleteListener + } + + // Draw the segmented bitmap on top of the background + val outputBitmap = + drawSegmentedBackground(segmentedBitmap, backgroundBitmap) + + // Create a new VideoFrame from the processed bitmap + val yuvConverter = YuvConverter() + if (textureHelper != null && textureHelper!!.handler != null) { + textureHelper!!.handler.post { + val textures = IntArray(1) + GLES20.glGenTextures(1, textures, 0) + GLES20.glBindTexture( + GLES20.GL_TEXTURE_2D, + textures[0] + ) + GLES20.glTexParameteri( + GLES20.GL_TEXTURE_2D, + GLES20.GL_TEXTURE_MIN_FILTER, + GLES20.GL_NEAREST + ) + GLES20.glTexParameteri( + GLES20.GL_TEXTURE_2D, + GLES20.GL_TEXTURE_MAG_FILTER, + GLES20.GL_NEAREST + ) + GLUtils.texImage2D( + GLES20.GL_TEXTURE_2D, + 0, + outputBitmap, + 0 + ) + val buffer = TextureBufferImpl( + outputBitmap!!.width, + outputBitmap.height, + VideoFrame.TextureBuffer.Type.RGB, + textures[0], + Matrix(), + textureHelper!!.handler, + yuvConverter, + null + ) + val i420Buf = yuvConverter.convert(buffer) + if (i420Buf != null) { + val outputVideoFrame = VideoFrame( + i420Buf, + original.rotation, + original.timestampNs + ) + sink.onFrame(outputVideoFrame) + } + } + } + } else { + // Handle segmentation error + val error = task.exception + // Log error information + Log.d(TAG, "Segmentation error: " + error.toString()) + } + } + } + + /** + * Convert the mask buffer to an array of colors representing the segmented regions. + * + * @param mask The mask buffer obtained from the AI segmenter. + * @param maskWidth The width of the mask. + * @param maskHeight The height of the mask. + * @param originalBitmap The original input bitmap used for color extraction. + * @param scaledWidth The width of the scaled bitmap. + * @param scaledHeight The height of the scaled bitmap. + * @return An array of colors representing the segmented regions. + */ + private fun maskColorsFromByteBuffer( + mask: ByteBuffer, + maskWidth: Int, + maskHeight: Int, + originalBitmap: Bitmap, + scaledWidth: Int, + scaledHeight: Int + ): IntArray { + val colors = IntArray(scaledWidth * scaledHeight) + var count = 0 + val scaleX = scaledWidth.toFloat() / maskWidth + val scaleY = scaledHeight.toFloat() / maskHeight + for (y in 0 until scaledHeight) { + for (x in 0 until scaledWidth) { + val maskX: Int = (x / scaleX).toInt() + val maskY: Int = (y / scaleY).toInt() + if (maskX in 0 until maskWidth && maskY >= 0 && maskY < maskHeight) { + val position = (maskY * maskWidth + maskX) * 4 + mask.position(position) + + // Get the confidence of the (x,y) pixel in the mask being in the foreground. + val foregroundConfidence = mask.float + val pixelColor = originalBitmap.getPixel(x, y) + + // Extract the color channels from the original pixel + val alpha = Color.alpha(pixelColor) + val red = Color.red(pixelColor) + val green = Color.green(pixelColor) + val blue = Color.blue(pixelColor) + + // Calculate the new alpha and color for the foreground and background + var newAlpha: Int + var newRed: Int + var newGreen: Int + var newBlue: Int + if (foregroundConfidence >= expectConfidence) { + // Foreground uses color from the original bitmap + newAlpha = alpha + newRed = red + newGreen = green + newBlue = blue + } else { + // Background is black with alpha 0 + newAlpha = 0 + newRed = 0 + newGreen = 0 + newBlue = 0 + } + + // Create a new color with the adjusted alpha and RGB channels + val newColor = Color.argb(newAlpha, newRed, newGreen, newBlue) + colors[count] = newColor + } else { + // Pixels outside the original mask size are considered background (black with alpha 0) + colors[count] = Color.argb(0, 0, 0, 0) + } + count++ + } + } + return colors + } + + /** + * Draws the segmentedBitmap on top of the backgroundBitmap with the background resized and centered + * to fit the dimensions of the segmentedBitmap. The output is a new bitmap containing the combined + * result. + * + * @param segmentedBitmap The bitmap representing the segmented foreground with transparency. + * @param backgroundBitmap The bitmap representing the background image to be used as the base. + * @return The resulting bitmap with the segmented foreground overlaid on the background. + * Returns null if either of the input bitmaps is null. + */ + private fun drawSegmentedBackground( + segmentedBitmap: Bitmap?, + backgroundBitmap: Bitmap? + ): Bitmap? { + if (segmentedBitmap == null || backgroundBitmap == null) { + // Handle invalid bitmaps + return null + } + + val segmentedWidth = segmentedBitmap.width + val segmentedHeight = segmentedBitmap.height + + // Create a new bitmap with dimensions matching the segmentedBitmap + val outputBitmap = + Bitmap.createBitmap(segmentedWidth, segmentedHeight, Bitmap.Config.ARGB_8888) + + // Create a canvas to draw on the outputBitmap + val canvas = Canvas(outputBitmap) + + // Calculate the scale factor for the backgroundBitmap to be larger or equal to the segmentedBitmap + val scaleX = segmentedWidth.toFloat() / backgroundBitmap.width + val scaleY = segmentedHeight.toFloat() / backgroundBitmap.height + val scale = max(scaleX, scaleY) + + // Calculate the new dimensions of the backgroundBitmap after scaling + val newBackgroundWidth = (backgroundBitmap.width * scale).toInt() + val newBackgroundHeight = (backgroundBitmap.height * scale).toInt() + + // Calculate the offset to center the backgroundBitmap in the outputBitmap + val offsetX = (segmentedWidth - newBackgroundWidth) / 2 + val offsetY = (segmentedHeight - newBackgroundHeight) / 2 + + // Create a transformation matrix to scale and center the backgroundBitmap + val matrix = Matrix() + matrix.postScale(scale, scale) + matrix.postTranslate(offsetX.toFloat(), offsetY.toFloat()) + + // Draw the backgroundBitmap on the canvas with the specified scale and centering + canvas.drawBitmap(backgroundBitmap, matrix, null) + + // Draw the segmentedBitmap on the canvas + canvas.drawBitmap(segmentedBitmap, 0f, 0f, null) + + return outputBitmap + } + + /** + * Creates a bitmap from an array of colors with the specified width and height. + * + * @param colors The array of colors representing the pixel values of the bitmap. + * @param width The width of the bitmap. + * @param height The height of the bitmap. + * @return The resulting bitmap created from the array of colors. + */ + private fun createBitmapFromColors(colors: IntArray, width: Int, height: Int): Bitmap { + return Bitmap.createBitmap(colors, width, height, Bitmap.Config.ARGB_8888) + } +} \ No newline at end of file diff --git a/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java b/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java index 02dca418..003b7c57 100755 --- a/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java +++ b/android/src/main/java/com/cloudwebrtc/webrtc/GetUserMediaImpl.java @@ -8,8 +8,6 @@ import android.content.Context; import android.content.Intent; import android.content.pm.PackageManager; -import android.graphics.Bitmap; -import android.graphics.ImageFormat; import android.hardware.Camera; import android.hardware.Camera.Parameters; import android.hardware.camera2.CameraAccessException; @@ -35,7 +33,6 @@ import android.view.Surface; import android.view.WindowManager; -import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.annotation.RequiresApi; @@ -51,14 +48,6 @@ import com.cloudwebrtc.webrtc.utils.MediaConstraintsUtils; import com.cloudwebrtc.webrtc.utils.ObjectType; import com.cloudwebrtc.webrtc.utils.PermissionUtils; -import com.google.android.gms.tasks.OnFailureListener; -import com.google.android.gms.tasks.OnSuccessListener; -import com.google.mlkit.common.MlKitException; -import com.google.mlkit.vision.common.InputImage; -import com.google.mlkit.vision.segmentation.Segmentation; -import com.google.mlkit.vision.segmentation.SegmentationMask; -import com.google.mlkit.vision.segmentation.Segmenter; -import com.google.mlkit.vision.segmentation.selfie.SelfieSegmenterOptions; import org.webrtc.AudioSource; import org.webrtc.AudioTrack; @@ -69,24 +58,18 @@ import org.webrtc.CameraEnumerationAndroid.CaptureFormat; import org.webrtc.CameraEnumerator; import org.webrtc.CameraVideoCapturer; -import org.webrtc.JavaI420Buffer; import org.webrtc.MediaConstraints; import org.webrtc.MediaStream; import org.webrtc.MediaStreamTrack; import org.webrtc.PeerConnectionFactory; import org.webrtc.SurfaceTextureHelper; import org.webrtc.VideoCapturer; -import org.webrtc.VideoFrame; -import org.webrtc.VideoProcessor; -import org.webrtc.VideoSink; import org.webrtc.VideoSource; import org.webrtc.VideoTrack; -import org.webrtc.YuvHelper; import org.webrtc.audio.JavaAudioDeviceModule; import java.io.File; import java.lang.reflect.Field; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -94,14 +77,6 @@ import io.flutter.plugin.common.MethodChannel.Result; -import android.graphics.Bitmap; -import android.graphics.BitmapFactory; -import android.graphics.Canvas; -import android.graphics.PorterDuff; -import android.media.Image; -import android.util.Log; -import androidx.camera.core.ImageProxy; - /** * The implementation of {@code getUserMedia} extracted into a separate file in order to reduce * complexity and to (somewhat) separate concerns. @@ -137,13 +112,7 @@ class GetUserMediaImpl { private final SparseArray mediaRecorders = new SparseArray<>(); private AudioDeviceInfo preferredInput = null; - private final SelfieSegmenterOptions segmentOptions = new SelfieSegmenterOptions.Builder() - .setDetectorMode(SelfieSegmenterOptions.SINGLE_IMAGE_MODE) - .build(); - private final Segmenter segmenter = Segmentation.getClient(segmentOptions); - - private VideoSource vbVideoSource = null; - private VideoSink vbVideoSink = null; + private FlutterRTCVirtualBackground flutterRTCVirtualBackground = null; public void screenRequestPermissions(ResultReceiver resultReceiver) { final Activity activity = stateProvider.getActivity(); @@ -239,9 +208,10 @@ public void onResume() { } } - GetUserMediaImpl(StateProvider stateProvider, Context applicationContext) { + GetUserMediaImpl(StateProvider stateProvider, Context applicationContext, FlutterRTCVirtualBackground flutterRTCVirtualBackground) { this.stateProvider = stateProvider; this.applicationContext = applicationContext; + this.flutterRTCVirtualBackground = flutterRTCVirtualBackground; } static private void resultError(String method, String error, Result result) { @@ -542,7 +512,7 @@ protected void onReceiveResult(int requestCode, Bundle resultData) { @Override public void onStop() { super.onStop(); - // After Huawei P30 and Android 10 version test, the onstop method is called, which will not affect the next process, + // After Huawei P30 and Android 10 version test, the onstop method is called, which will not affect the next process, // and there is no need to call the resulterror method //resultError("MediaProjection.Callback()", "User revoked permission to capture the screen.", result); } @@ -773,9 +743,7 @@ private ConstraintsMap getUserVideo(ConstraintsMap constraints, MediaStream medi PeerConnectionFactory pcFactory = stateProvider.getPeerConnectionFactory(); VideoSource videoSource = pcFactory.createVideoSource(false); - vbVideoSource = videoSource; - - setVirtualBackground(); + flutterRTCVirtualBackground.initialize(videoSource); String threadName = Thread.currentThread().getName() + "_texture_camera_thread"; SurfaceTextureHelper surfaceTextureHelper = @@ -840,223 +808,11 @@ private ConstraintsMap getUserVideo(ConstraintsMap constraints, MediaStream medi return trackParams; } - void setVirtualBackground() { - vbVideoSource.setVideoProcessor(new VideoProcessor() { - @Override - public void onCapturerStarted(boolean success) { - // Xử lý khi bắt đầu capture video - } - - @Override - public void onCapturerStopped() { - // Xử lý khi dừng capture video - } - - @Override - public void onFrameCaptured(VideoFrame frame) { - // Chuyển đổi frame thành bitmap - Bitmap bitmap = videoFrameToBitmap(frame); - - // Xử lý segment với bitmap - processSegmentation(bitmap); - } - - @Override - public void setSink(VideoSink sink) { - // Lưu sink để gửi frame đã được cập nhật trở lại WebRTC - // Sink sẽ được sử dụng sau khi xử lý segment - vbVideoSink = sink; - } - }); - } - - public Bitmap videoFrameToBitmap(VideoFrame videoFrame) { - VideoFrame.Buffer buffer = videoFrame.getBuffer(); - int width = buffer.getWidth(); - int height = buffer.getHeight(); - - if (buffer instanceof VideoFrame.TextureBuffer) { - // Không hỗ trợ trực tiếp chuyển đổi từ TextureBuffer sang Bitmap - return null; - } else if (buffer instanceof VideoFrame.I420Buffer) { - VideoFrame.I420Buffer i420Buffer = (VideoFrame.I420Buffer) buffer; - - int ySize = width * height; - int uvSize = width * height / 4; - - ByteBuffer dataY = i420Buffer.getDataY(); - ByteBuffer dataU = i420Buffer.getDataU(); - ByteBuffer dataV = i420Buffer.getDataV(); - - byte[] dataYArray = new byte[ySize]; - byte[] dataUArray = new byte[uvSize]; - byte[] dataVArray = new byte[uvSize]; - - dataY.get(dataYArray); - dataU.get(dataUArray); - dataV.get(dataVArray); - - // Chuyển đổi từ YUV sang RGB - int[] rgbData = convertYUVtoRGB(dataYArray, dataUArray, dataVArray, width, height); - - // Tạo Bitmap từ dữ liệu RGB - Bitmap bitmap = Bitmap.createBitmap(rgbData, width, height, Bitmap.Config.ARGB_8888); - - return bitmap; - } - - return null; - } - - private int[] convertYUVtoRGB(byte[] yData, byte[] uData, byte[] vData, int width, int height) { - int[] rgbData = new int[width * height]; - int uvIndex = 0; - int yOffset = 0; - - for (int y = 0; y < height; y++) { - int uvRowStart = uvIndex; - int uvRowOffset = y >> 1; - - for (int x = 0; x < width; x++) { - int yIndex = yOffset + x; - int uvIndexOffset = uvRowStart + (x >> 1); - - int yValue = yData[yIndex] & 0xFF; - int uValue = uData[uvIndexOffset] & 0xFF; - int vValue = vData[uvIndexOffset] & 0xFF; - - int r = yValue + (int) (1.370705f * (vValue - 128)); - int g = yValue - (int) (0.698001f * (vValue - 128)) - (int) (0.337633f * (uValue - 128)); - int b = yValue + (int) (1.732446f * (uValue - 128)); - - r = Math.max(0, Math.min(255, r)); - g = Math.max(0, Math.min(255, g)); - b = Math.max(0, Math.min(255, b)); - - int pixelColor = 0xFF000000 | (r << 16) | (g << 8) | b; - rgbData[y * width + x] = pixelColor; - } - - if (y % 2 == 1) { - uvIndex = uvRowStart + width / 2; - yOffset += width; - } - } - - return rgbData; - } - - private void processSegmentation(Bitmap bitmap) { - // Tạo InputImage từ bitmap - InputImage inputImage = InputImage.fromBitmap(bitmap, 0); - - // Xử lý phân đoạn - segmenter.process(inputImage) - .addOnSuccessListener(new OnSuccessListener() { - @Override - public void onSuccess(@NonNull SegmentationMask segmentationMask) { - // Xử lý khi phân đoạn thành công - ByteBuffer mask = segmentationMask.getBuffer(); - int maskWidth = segmentationMask.getWidth(); - int maskHeight = segmentationMask.getHeight(); - mask.rewind(); - - // Chuyển đổi buffer thành mảng màu - int[] colors = maskColorsFromByteBuffer(mask, maskWidth, maskHeight); - - // Tạo bitmap đã được phân đoạn từ mảng màu - Bitmap segmentedBitmap = createBitmapFromColors(colors, maskWidth, maskHeight); - - // Vẽ ảnh nền đã phân đoạn lên canvas - Bitmap outputBitmap = drawSegmentedBackground(segmentedBitmap, segmentedBitmap); - - // Tạo VideoFrame mới từ bitmap đã xử lý - int frameRotation = 180; // Frame rotation angle (customize as needed) - long frameTimestamp = System.nanoTime(); // Frame timestamp (customize as needed) - VideoFrame outputVideoFrame = createVideoFrame(outputBitmap, frameRotation, frameTimestamp); - - // Gửi frame đã được cập nhật trở lại WebRTC - vbVideoSink.onFrame(outputVideoFrame); - } - }) - .addOnFailureListener(new OnFailureListener() { - @Override - public void onFailure(@NonNull Exception exception) { - // Xử lý khi phân đoạn thất bại - Log.e(TAG, "Segmentation failed: " + exception.getMessage()); - } - }); - } - - private Bitmap drawSegmentedBackground(Bitmap segmentedBitmap, Bitmap backgroundBitmap) { - Bitmap outputBitmap = Bitmap.createBitmap( - segmentedBitmap.getWidth(), segmentedBitmap.getHeight(), Bitmap.Config.ARGB_8888 - ); - Canvas canvas = new Canvas(outputBitmap); - - // Vẽ ảnh nền đã phân đoạn lên canvas - canvas.drawBitmap(backgroundBitmap, 0, 0, null); - canvas.drawBitmap(segmentedBitmap, 0, 0, null); - - return outputBitmap; - } - - private VideoFrame createVideoFrame(Bitmap bitmap, int rotation, long timestampNs) { - ByteBuffer buffer = ByteBuffer.allocate(bitmap.getByteCount()); - bitmap.copyPixelsToBuffer(buffer); - byte[] data = buffer.array(); - - int width = bitmap.getWidth(); - int height = bitmap.getHeight(); - int strideY = width; - int strideU = (width + 1) / 2; - int strideV = (width + 1) / 2; - - byte[] dataU = new byte[width * height / 4]; - byte[] dataV = new byte[width * height / 4]; - for (int i = 0; i < width * height / 4; i++) { - dataU[i] = data[width * height + i]; - dataV[i] = data[width * height + width * height / 4 + i]; - } - - Runnable releaseCallback = () -> { - // Thực hiện các thao tác giải phóng tài nguyên liên quan tại đây (nếu có) - }; - - VideoFrame.I420Buffer i420Buffer = JavaI420Buffer.wrap( - width, - height, - ByteBuffer.wrap(data), - strideY, - ByteBuffer.wrap(dataU), - strideU, ByteBuffer.wrap(dataV), strideV, releaseCallback - ); - - return new VideoFrame(i420Buffer, rotation, timestampNs); - } - - - // Hàm chuyển đổi buffer thành mảng màu - private int[] maskColorsFromByteBuffer(ByteBuffer buffer, int width, int height) { - // Chuyển đổi từ ByteBuffer thành mảng màu, tùy thuộc vào định dạng màu - // của buffer. Đảm bảo bạn sử dụng đúng định dạng màu tương ứng với - // phân đoạn của ML Kit. - // Trong ví dụ này, chúng tôi giả định rằng buffer có định dạng ARGB_8888. - - // Ví dụ: chuyển đổi từ ByteBuffer thành mảng ARGB_8888 - int[] colors = new int[width * height]; - buffer.asIntBuffer().get(colors); - - return colors; - } - - // Hàm tạo bitmap từ mảng màu - private Bitmap createBitmapFromColors(int[] colors, int width, int height) { - return Bitmap.createBitmap(colors, width, height, Bitmap.Config.ARGB_8888); - } - void removeVideoCapturerSync(String id) { synchronized (mVideoCapturers) { + // Dispose Virtual Background + flutterRTCVirtualBackground.dispose(); + VideoCapturerInfo info = mVideoCapturers.get(id); if (info != null) { try { diff --git a/android/src/main/java/com/cloudwebrtc/webrtc/MethodCallHandlerImpl.java b/android/src/main/java/com/cloudwebrtc/webrtc/MethodCallHandlerImpl.java index e4270a40..e07774b5 100644 --- a/android/src/main/java/com/cloudwebrtc/webrtc/MethodCallHandlerImpl.java +++ b/android/src/main/java/com/cloudwebrtc/webrtc/MethodCallHandlerImpl.java @@ -5,11 +5,12 @@ import android.app.Activity; import android.content.Context; import android.content.pm.PackageManager; +import android.graphics.Bitmap; +import android.graphics.BitmapFactory; import android.graphics.SurfaceTexture; import android.hardware.Camera; import android.hardware.Camera.CameraInfo; import android.media.AudioDeviceInfo; -import android.media.AudioManager; import android.os.Build; import android.util.Log; import android.util.LongSparseArray; @@ -34,8 +35,6 @@ import org.webrtc.AudioTrack; import org.webrtc.CryptoOptions; -import org.webrtc.DefaultVideoEncoderFactory; -import org.webrtc.DefaultVideoDecoderFactory; import org.webrtc.DtmfSender; import org.webrtc.EglBase; import org.webrtc.IceCandidate; @@ -70,7 +69,6 @@ import org.webrtc.audio.JavaAudioDeviceModule; import java.io.File; -import java.io.UnsupportedEncodingException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -110,6 +108,8 @@ public class MethodCallHandlerImpl implements MethodCallHandler, StateProvider { */ private GetUserMediaImpl getUserMediaImpl; + private FlutterRTCVirtualBackground flutterRTCVirtualBackground; + private AudioDeviceModule audioDeviceModule; private FlutterRTCFrameCryptor frameCryptor; @@ -158,7 +158,8 @@ private void initialize(int networkIgnoreMask) { // Initialize EGL contexts required for HW acceleration. EglBase.Context eglContext = EglUtils.getRootEglBaseContext(); - getUserMediaImpl = new GetUserMediaImpl(this, context); + flutterRTCVirtualBackground = new FlutterRTCVirtualBackground(); + getUserMediaImpl = new GetUserMediaImpl(this, context, flutterRTCVirtualBackground); frameCryptor = new FlutterRTCFrameCryptor(this); /* @@ -260,6 +261,22 @@ public void onMethodCall(MethodCall call, @NonNull Result notSafeResult) { getUserMedia(constraintsMap, result); break; } + case "enableVirtualBackground":{ + byte[] image = call.argument("imageBytes"); + double confidence = call.argument("confidence"); + Bitmap bgImage = null; + if (image != null) { + bgImage = BitmapFactory.decodeByteArray(image, 0, image.length); + } + flutterRTCVirtualBackground.configurationVirtualBackground(bgImage, confidence); + result.success(true); + break; + } + case "disableVirtualBackground": { + flutterRTCVirtualBackground.setBackgroundIsNull(); + result.success(true); + break; + } case "createLocalMediaStream": createLocalMediaStream(result); break; diff --git a/lib/src/helper.dart b/lib/src/helper.dart index 84149b2e..352d9476 100644 --- a/lib/src/helper.dart +++ b/lib/src/helper.dart @@ -134,4 +134,19 @@ class Helper { AppleNativeAudioManagement.setAppleAudioConfiguration( AppleNativeAudioManagement.getAppleAudioConfigurationForMode(mode, preferSpeakerOutput: preferSpeakerOutput)); + + // Virtual Background + static Future enableVirtualBackground({ + required Uint8List backgroundImage, + double thresholdConfidence = 0.7, + }) async { + await WebRTC.invokeMethod("enableVirtualBackground", { + "imageBytes": backgroundImage, + "confidence": thresholdConfidence, + }); + } + + static Future disableVirtualBackground() async { + await WebRTC.invokeMethod("disableVirtualBackground"); + } }