- Better text formatting on home screen for smaller screen.

- Set default backend based on the order of "accelerators" field in model allowlist
This commit is contained in:
Jing Jin 2025-05-19 11:43:52 -07:00
parent 4d0c570d34
commit e89fd9a23b
6 changed files with 7 additions and 8 deletions

View file

@ -30,7 +30,7 @@ android {
minSdk = 26
targetSdk = 35
versionCode = 1
versionName = "0.9.2"
versionName = "0.9.3"
// Needed for HuggingFace auth workflows.
manifestPlaceholders["appAuthRedirectScheme"] = "com.google.aiedge.gallery.oauth"

View file

@ -353,7 +353,7 @@ private fun TaskList(
val linkColor = MaterialTheme.customColors.linkColor
val introText = buildAnnotatedString {
append("Welcome to Google AI Edge Gallery! Explore a world of \namazing on-device models from ")
append("Welcome to Google AI Edge Gallery! Explore a world of amazing on-device models from ")
withLink(
link = LinkAnnotation.Url(
url = "https://huggingface.co/litert-community", // Replace with the actual URL

View file

@ -67,7 +67,7 @@ fun createLlmChatConfigs(
),
SegmentedButtonConfig(
key = ConfigKey.ACCELERATOR,
defaultValue = if (accelerators.contains(Accelerator.GPU)) Accelerator.GPU.label else accelerators[0].label,
defaultValue = accelerators[0].label,
options = accelerators.map { it.label }
)
)

View file

@ -132,12 +132,8 @@ object LlmChatModelHelper {
input: String,
resultListener: ResultListener,
cleanUpListener: CleanUpListener,
singleTurn: Boolean = false,
image: Bitmap? = null,
) {
if (singleTurn) {
resetSession(model = model)
}
val instance = model.instance as LlmModelInstance
// Set listener.

View file

@ -61,6 +61,7 @@ open class LlmChatViewModel(curTask: Task = TASK_LLM_CHAT) : ChatViewModel(task
while (model.instance == null) {
delay(100)
}
delay(500)
// Run inference.
val instance = model.instance as LlmModelInstance

View file

@ -78,6 +78,9 @@ open class LlmSingleTurnViewModel(val task: Task = TASK_LLM_PROMPT_LAB) : ViewMo
delay(100)
}
LlmChatModelHelper.resetSession(model = model)
delay(500)
// Run inference.
val instance = model.instance as LlmModelInstance
val prefillTokens = instance.session.sizeInTokens(input)
@ -145,7 +148,6 @@ open class LlmSingleTurnViewModel(val task: Task = TASK_LLM_PROMPT_LAB) : ViewMo
setInProgress(false)
}
},
singleTurn = true,
cleanUpListener = {
setPreparing(false)
setInProgress(false)