diff --git a/app/src/main/java/com/android/ai/catalog/domain/SampleCatalog.kt b/app/src/main/java/com/android/ai/catalog/domain/SampleCatalog.kt
index b16cafff..1fadf927 100644
--- a/app/src/main/java/com/android/ai/catalog/domain/SampleCatalog.kt
+++ b/app/src/main/java/com/android/ai/catalog/domain/SampleCatalog.kt
@@ -127,7 +127,7 @@ val sampleCatalog = listOf(
description = R.string.magic_selfie_sample_list_description,
route = "MagicSelfieScreen",
sampleEntryScreen = { MagicSelfieScreen() },
- tags = listOf(SampleTags.IMAGEN, SampleTags.FIREBASE, SampleTags.ML_KIT),
+ tags = listOf(SampleTags.GEMINI_FLASH, SampleTags.FIREBASE),
needsFirebase = true,
keyArt = R.drawable.img_keyart_magic_selfie,
),
diff --git a/app/src/main/res/values/strings.xml b/app/src/main/res/values/strings.xml
index 37c8270b..19e449e6 100644
--- a/app/src/main/res/values/strings.xml
+++ b/app/src/main/res/values/strings.xml
@@ -17,8 +17,8 @@
@@ -12,24 +12,19 @@ This sample demonstrates how to create a "magic selfie" by replacing the backgro
## How it works
-The application uses two main components. First, the ML Kit Subject Segmentation API processes the user's selfie to create a bitmap containing only the foreground (the person). Second, the Firebase AI SDK (see [How to run](../../#how-to-run)) for Android interacts with the Imagen model to generate a new background image from a user-provided text prompt. Finally, the application combines the foreground bitmap with the newly generated background to create the final magic selfie. The core logic for this process is in the [`MagicSelfieViewModel.kt`](./src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieViewModel.kt) and [`MagicSelfieRepository.kt`](./src/main/java/com/android/ai/samples/magicselfie/data/MagicSelfieRepository.kt) files.
+The application uses the Firebase AI SDK (see [How to run](../../#how-to-run)) for Android to interact with the Nano Banana 2 model. Unlike older approaches that require manual subject segmentation and image compositing, Nano Banana 2 can process a multimodal prompt (an image plus text) to modify the scene directly. The application sends the user's selfie and a prompt describing the desired background, and the model generates a new version of the image with the background replaced. The core logic for this process is in the [`MagicSelfieViewModel.kt`](./src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieViewModel.kt) and [`MagicSelfieRepository.kt`](./src/main/java/com/android/ai/samples/magicselfie/data/MagicSelfieRepository.kt) files.
-Here is the key snippet of code that orchestrates the magic selfie creation from [`MagicSelfieViewModel.kt`](./src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieViewModel.kt):
+Here is the key snippet of code that calls the generative model from [`MagicSelfieRepository.kt`](./src/main/java/com/android/ai/samples/magicselfie/data/MagicSelfieRepository.kt):
```kotlin
-fun createMagicSelfie(bitmap: Bitmap, prompt: String) {
- viewModelScope.launch {
- try {
- _uiState.value = MagicSelfieUiState.RemovingBackground
- val foregroundBitmap = magicSelfieRepository.generateForegroundBitmap(bitmap)
- _uiState.value = MagicSelfieUiState.GeneratingBackground
- val backgroundBitmap = magicSelfieRepository.generateBackground(prompt)
- val resultBitmap = magicSelfieRepository.combineBitmaps(foregroundBitmap, backgroundBitmap)
- _uiState.value = MagicSelfieUiState.Success(resultBitmap)
- } catch (e: Exception) {
- _uiState.value = MagicSelfieUiState.Error(e.message)
- }
+suspend fun generateMagicSelfie(bitmap: Bitmap, prompt: String): Bitmap {
+ val multimodalPrompt = content {
+ image(bitmap)
+ text("Change the background of this image to $prompt")
}
+ val response = generativeModel.generateContent(multimodalPrompt)
+ return response.candidates.firstOrNull()?.content?.parts?.firstNotNullOfOrNull { it.asImageOrNull() }
+ ?: throw Exception("No image generated")
}
```
diff --git a/samples/magic-selfie/build.gradle.kts b/samples/magic-selfie/build.gradle.kts
index 0b5cd2d6..9d59936c 100644
--- a/samples/magic-selfie/build.gradle.kts
+++ b/samples/magic-selfie/build.gradle.kts
@@ -69,7 +69,6 @@ dependencies {
implementation(libs.hilt.android)
implementation(libs.hilt.navigation.compose)
implementation(libs.androidx.runtime.livedata)
- implementation(libs.mlkit.segmentation)
implementation(libs.ui.tooling.preview)
debugImplementation(libs.ui.tooling)
diff --git a/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/data/MagicSelfieRepository.kt b/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/data/MagicSelfieRepository.kt
index 8130ac4f..19952fef 100644
--- a/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/data/MagicSelfieRepository.kt
+++ b/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/data/MagicSelfieRepository.kt
@@ -16,87 +16,34 @@
package com.android.ai.samples.magicselfie.data
import android.graphics.Bitmap
-import android.graphics.Canvas
-import android.graphics.Paint
import com.google.firebase.Firebase
import com.google.firebase.ai.ai
import com.google.firebase.ai.type.GenerativeBackend
-import com.google.firebase.ai.type.ImagenAspectRatio
-import com.google.firebase.ai.type.ImagenGenerationConfig
-import com.google.firebase.ai.type.ImagenImageFormat
-import com.google.firebase.ai.type.PublicPreviewAPI
-import com.google.mlkit.vision.common.InputImage
-import com.google.mlkit.vision.segmentation.subject.SubjectSegmentation
-import com.google.mlkit.vision.segmentation.subject.SubjectSegmenterOptions
+import com.google.firebase.ai.type.ResponseModality
+import com.google.firebase.ai.type.asImageOrNull
+import com.google.firebase.ai.type.content
+import com.google.firebase.ai.type.generationConfig
import javax.inject.Inject
import javax.inject.Singleton
-import kotlin.coroutines.suspendCoroutine
-import kotlin.math.roundToInt
@Singleton
class MagicSelfieRepository @Inject constructor() {
- @OptIn(PublicPreviewAPI::class)
- private val imagenModel = Firebase.ai(backend = GenerativeBackend.vertexAI()).imagenModel(
- modelName = "imagen-4.0-generate-001",
- generationConfig = ImagenGenerationConfig(
- numberOfImages = 1,
- aspectRatio = ImagenAspectRatio.PORTRAIT_3x4,
- imageFormat = ImagenImageFormat.jpeg(compressionQuality = 75),
- ),
- )
-
- private val subjectSegmenter = SubjectSegmentation.getClient(
- SubjectSegmenterOptions.Builder()
- .enableForegroundBitmap()
- .build(),
- )
-
- suspend fun generateForegroundBitmap(bitmap: Bitmap): Bitmap {
- val image = InputImage.fromBitmap(bitmap, 0)
- return suspendCoroutine { continuation ->
- subjectSegmenter.process(image)
- .addOnSuccessListener {
- it.foregroundBitmap?.let { foregroundBitmap ->
- continuation.resumeWith(Result.success(foregroundBitmap))
- }
- }
- .addOnFailureListener {
- continuation.resumeWith(Result.failure(it))
- }
- }
- }
-
- @OptIn(PublicPreviewAPI::class)
- suspend fun generateBackground(prompt: String): Bitmap {
- val imageResponse = imagenModel.generateImages(
- prompt = prompt,
+ private val generativeModel by lazy {
+ Firebase.ai(backend = GenerativeBackend.googleAI()).generativeModel(
+ modelName = "gemini-3.1-flash-image-preview",
+ generationConfig = generationConfig {
+ responseModalities = listOf(ResponseModality.TEXT, ResponseModality.IMAGE)
+ }
)
- val image = imageResponse.images.first()
- return image.asBitmap()
}
- fun combineBitmaps(foreground: Bitmap, background: Bitmap): Bitmap {
- val height = background.height
- val width = background.width
-
- val resultBitmap = Bitmap.createBitmap(width, height, background.config!!)
- val canvas = Canvas(resultBitmap)
- val paint = Paint()
- canvas.drawBitmap(background, 0f, 0f, paint)
-
- var foregroundHeight = foreground.height
- var foregroundWidth = foreground.width
- val ratio = foregroundWidth.toFloat() / foregroundHeight.toFloat()
-
- foregroundHeight = height
- foregroundWidth = (foregroundHeight * ratio).roundToInt()
-
- val scaledForeground = Bitmap.createScaledBitmap(foreground, foregroundWidth, foregroundHeight, false)
-
- val left = (width - scaledForeground.width) / 2f
- val top = (height - scaledForeground.height.toFloat())
- canvas.drawBitmap(scaledForeground, left, top, paint)
-
- return resultBitmap
+ suspend fun generateMagicSelfie(bitmap: Bitmap, prompt: String): Bitmap {
+ val multimodalPrompt = content {
+ image(bitmap)
+ text("Change the background of this image to $prompt")
+ }
+ val response = generativeModel.generateContent(multimodalPrompt)
+ return response.candidates.firstOrNull()?.content?.parts?.firstNotNullOfOrNull { it.asImageOrNull() }
+ ?: throw Exception("No image generated")
}
}
diff --git a/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieScreen.kt b/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieScreen.kt
index bd08381f..148247be 100644
--- a/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieScreen.kt
+++ b/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieScreen.kt
@@ -235,7 +235,6 @@ private fun MagicSelfieScreen(
text = "",
icon = painterResource(id = com.android.ai.uicomponent.R.drawable.ic_ai_bg),
enabled = textFieldState.text.isNotEmpty() &&
- (uiState !is MagicSelfieUiState.RemovingBackground) &&
(uiState !is MagicSelfieUiState.GeneratingBackground),
) {
onGenerateClick(selfieBitmap, textFieldState.text.toString())
@@ -246,8 +245,7 @@ private fun MagicSelfieScreen(
SecondaryButton(
text = "",
icon = painterResource(id = com.android.ai.uicomponent.R.drawable.ic_ai_img),
- enabled = (uiState !is MagicSelfieUiState.RemovingBackground) &&
- (uiState !is MagicSelfieUiState.GeneratingBackground),
+ enabled = (uiState !is MagicSelfieUiState.GeneratingBackground),
onClick = onTakePictureClick,
)
},
diff --git a/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieUiState.kt b/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieUiState.kt
index 2ada49bf..ff30be89 100644
--- a/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieUiState.kt
+++ b/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieUiState.kt
@@ -19,7 +19,6 @@ import android.graphics.Bitmap
sealed interface MagicSelfieUiState {
data object Initial : MagicSelfieUiState
- data object RemovingBackground : MagicSelfieUiState
data object GeneratingBackground : MagicSelfieUiState
data class Success(val bitmap: Bitmap) : MagicSelfieUiState
data class Error(val message: String?) : MagicSelfieUiState
diff --git a/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieViewModel.kt b/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieViewModel.kt
index 30f68adf..64eac062 100644
--- a/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieViewModel.kt
+++ b/samples/magic-selfie/src/main/java/com/android/ai/samples/magicselfie/ui/MagicSelfieViewModel.kt
@@ -34,11 +34,8 @@ class MagicSelfieViewModel @Inject constructor(private val magicSelfieRepository
fun createMagicSelfie(bitmap: Bitmap, prompt: String) {
viewModelScope.launch {
try {
- _uiState.value = MagicSelfieUiState.RemovingBackground
- val foregroundBitmap = magicSelfieRepository.generateForegroundBitmap(bitmap)
_uiState.value = MagicSelfieUiState.GeneratingBackground
- val backgroundBitmap = magicSelfieRepository.generateBackground(prompt)
- val resultBitmap = magicSelfieRepository.combineBitmaps(foregroundBitmap, backgroundBitmap)
+ val resultBitmap = magicSelfieRepository.generateMagicSelfie(bitmap, prompt)
_uiState.value = MagicSelfieUiState.Success(resultBitmap)
} catch (e: Exception) {
_uiState.value = MagicSelfieUiState.Error(e.message)
diff --git a/samples/magic-selfie/src/main/res/values/strings.xml b/samples/magic-selfie/src/main/res/values/strings.xml
index 9a802fd5..7524495a 100644
--- a/samples/magic-selfie/src/main/res/values/strings.xml
+++ b/samples/magic-selfie/src/main/res/values/strings.xml
@@ -1,7 +1,7 @@