5 Commits

16 changed files with 834 additions and 38 deletions

View File

@@ -4,7 +4,7 @@
<selectionStates> <selectionStates>
<SelectionState runConfigName="app"> <SelectionState runConfigName="app">
<option name="selectionMode" value="DROPDOWN" /> <option name="selectionMode" value="DROPDOWN" />
<DropdownSelection timestamp="2026-02-25T23:24:39.552459762Z"> <DropdownSelection timestamp="2026-02-28T04:08:30.769945596Z">
<Target type="DEFAULT_BOOT"> <Target type="DEFAULT_BOOT">
<handle> <handle>
<DeviceId pluginId="LocalEmulator" identifier="path=/home/michael/.android/avd/Pixel_8_API_35.avd" /> <DeviceId pluginId="LocalEmulator" identifier="path=/home/michael/.android/avd/Pixel_8_API_35.avd" />

View File

@@ -3,7 +3,7 @@ plugins {
alias(libs.plugins.kotlin.android) alias(libs.plugins.kotlin.android)
alias(libs.plugins.kotlin.compose) alias(libs.plugins.kotlin.compose)
id("com.chaquo.python") // Apply it here id("com.chaquo.python") // Apply it here
id("kotlin-kapt") // Added for the Room Android database subsystem and libraries id("com.google.devtools.ksp") // Added for the Room Android database subsystem and libraries
} }
chaquopy { chaquopy {
@@ -84,5 +84,14 @@ dependencies {
// Room Database for local chat history // Room Database for local chat history
implementation("androidx.room:room-runtime:2.6.1") implementation("androidx.room:room-runtime:2.6.1")
implementation("androidx.room:room-ktx:2.6.1") implementation("androidx.room:room-ktx:2.6.1")
kapt("androidx.room:room-compiler:2.6.1") ksp("androidx.room:room-compiler:2.6.1")
// Llama.cpp Kotlin Multiplatform Wrapper
implementation("com.llamatik:library:0.8.1")
// Google AI Edge - MediaPipe LLM Inference API
implementation("com.google.mediapipe:tasks-genai:0.10.27")
// Extended Material Icons (for Download, CheckCircle, etc.)
implementation("androidx.compose.material:material-icons-extended")
} }

View File

@@ -4,6 +4,12 @@
<uses-permission android:name="android.permission.INTERNET" /> <uses-permission android:name="android.permission.INTERNET" />
<uses-permission android:name="android.permission.BIND_ACCESSIBILITY_SERVICE" />
<uses-permission android:name="android.permission.READ_SMS"/>
<uses-permission android:name="android.permission.RECEIVE_SMS"/> <!-- for new messages -->
<application <application
android:name="AliceApp" android:name="AliceApp"
android:usesCleartextTraffic="true" android:usesCleartextTraffic="true"
@@ -26,6 +32,17 @@
<category android:name="android.intent.category.LAUNCHER" /> <category android:name="android.intent.category.LAUNCHER" />
</intent-filter> </intent-filter>
</activity> </activity>
<service
android:name=".AliceAccessibilityService"
android:permission="android.permission.BIND_ACCESSIBILITY_SERVICE"
android:exported="false">
<intent-filter>
<action android:name="android.accessibilityservice.AccessibilityService" />
</intent-filter>
<meta-data
android:name="android.accessibilityservice"
android:resource="@xml/accessibility_service_config" />
</service>
</application> </application>
</manifest> </manifest>

View File

@@ -0,0 +1,21 @@
package net.mmanningau.alice
import android.accessibilityservice.AccessibilityService
import android.view.accessibility.AccessibilityEvent
import android.util.Log
class AliceAccessibilityService : AccessibilityService() {
override fun onServiceConnected() {
super.onServiceConnected()
Log.d("AliceAccessibility", "Service Connected and Ready!")
}
override fun onAccessibilityEvent(event: AccessibilityEvent?) {
// We will build the screen-reading logic here later!
}
override fun onInterrupt() {
Log.e("AliceAccessibility", "Service Interrupted")
}
}

View File

@@ -0,0 +1,115 @@
package net.mmanningau.alice
import com.llamatik.library.platform.LlamaBridge
import dev.langchain4j.agent.tool.ToolExecutionRequest
import dev.langchain4j.agent.tool.ToolSpecification
import dev.langchain4j.data.message.AiMessage
import dev.langchain4j.data.message.ChatMessage
import dev.langchain4j.data.message.SystemMessage
import dev.langchain4j.data.message.ToolExecutionResultMessage
import dev.langchain4j.data.message.UserMessage
import dev.langchain4j.model.chat.ChatLanguageModel
import dev.langchain4j.model.output.Response
import org.json.JSONObject
import java.io.File
import java.util.UUID
class LlamaCppAdapter(private val modelPath: String) : ChatLanguageModel {
private var isEngineLoaded = false
private fun getOrInitEngine() {
if (!isEngineLoaded) {
val modelFile = File(modelPath)
if (!modelFile.exists()) {
throw IllegalStateException("Model file not found at: $modelPath. Please download a model first.")
}
LlamaBridge.initGenerateModel(modelPath)
isEngineLoaded = true
}
}
override fun generate(messages: List<ChatMessage>): Response<AiMessage> {
return generate(messages, emptyList())
}
// Advanced generator (With tools)
override fun generate(
messages: List<ChatMessage>,
toolSpecifications: List<ToolSpecification>
): Response<AiMessage> {
getOrInitEngine()
val promptBuilder = java.lang.StringBuilder()
// 1. Build the Fool-Proof System Prompt
val toolsPrompt = java.lang.StringBuilder()
if (toolSpecifications.isNotEmpty()) {
toolsPrompt.append("\n\n# AVAILABLE TOOLS\n")
for (tool in toolSpecifications) {
toolsPrompt.append("- ${tool.name()}: ${tool.description()}\n")
}
toolsPrompt.append("\nCRITICAL INSTRUCTION: To use a tool, you MUST reply with a JSON object. Do NOT use Python parentheses.\n")
toolsPrompt.append("Correct Example:\n{\"name\": \"get_battery_level\", \"arguments\": {}}\n")
}
// 2. Format the Chat History
for (message in messages) {
when (message) {
is SystemMessage -> {
val content = message.text() + toolsPrompt.toString()
promptBuilder.append("<|im_start|>system\n$content<|im_end|>\n")
}
is UserMessage -> promptBuilder.append("<|im_start|>user\n${message.text()}<|im_end|>\n")
is ToolExecutionResultMessage -> {
promptBuilder.append("<|im_start|>user\nTool [${message.toolName()}] returned: ${message.text()}<|im_end|>\n")
}
is AiMessage -> {
if (message.hasToolExecutionRequests()) {
val request = message.toolExecutionRequests()[0]
promptBuilder.append("<|im_start|>assistant\n{\"name\": \"${request.name()}\", \"arguments\": ${request.arguments()}}<|im_end|>\n")
} else {
val cleanText = message.text()?.replace(Regex("Calling tool:.*?\\.\\.\\."), "")?.trim() ?: ""
if (cleanText.isNotBlank()) {
promptBuilder.append("<|im_start|>assistant\n$cleanText<|im_end|>\n")
}
}
}
}
}
promptBuilder.append("<|im_start|>assistant\n")
// 3. Execution
val responseText = LlamaBridge.generate(promptBuilder.toString()).replace("<|im_end|>", "").trim()
// 4. Parse the Output (Regex Hunter)
if (toolSpecifications.isNotEmpty()) {
// Hunt for the pattern "name": "something" regardless of surrounding brackets
val nameRegex = Regex("\"name\"\\s*:\\s*\"([^\"]+)\"")
val match = nameRegex.find(responseText.replace("'", "\"")) // Sanitize single quotes first
if (match != null) {
val toolName = match.groupValues[1]
// Try to extract arguments if they exist, otherwise default to empty JSON
var argumentsJson = "{}"
val argRegex = Regex("\"arguments\"\\s*:\\s*(\\{.*?\\})")
val argMatch = argRegex.find(responseText.replace("'", "\""))
if (argMatch != null) {
argumentsJson = argMatch.groupValues[1]
}
val request = ToolExecutionRequest.builder()
.id(UUID.randomUUID().toString())
.name(toolName)
.arguments(argumentsJson)
.build()
return Response.from(AiMessage.from("Calling tool: $toolName...", listOf(request)))
}
}
// 5. Standard Text Output
return Response.from(AiMessage(responseText))
}
}

View File

@@ -1,5 +1,7 @@
package net.mmanningau.alice package net.mmanningau.alice
import android.content.Context
import android.util.Log
import dev.langchain4j.data.message.AiMessage import dev.langchain4j.data.message.AiMessage
import dev.langchain4j.data.message.ChatMessage import dev.langchain4j.data.message.ChatMessage
import dev.langchain4j.data.message.SystemMessage import dev.langchain4j.data.message.SystemMessage
@@ -11,12 +13,17 @@ import java.time.Duration
import java.text.SimpleDateFormat import java.text.SimpleDateFormat
import java.util.Date import java.util.Date
import java.util.Locale import java.util.Locale
import kotlinx.coroutines.flow.MutableStateFlow
import kotlinx.coroutines.flow.StateFlow
object LlmManager { object LlmManager {
private var chatModel: ChatLanguageModel? = null private var chatModel: ChatLanguageModel? = null
var currentMode: String = "Remote" var currentMode: String = "Remote"
private set private set
// Hardware telemetry for the UI
private val _hardwareBackend = MutableStateFlow("Standby")
val hardwareBackend: StateFlow<String> = _hardwareBackend
// Database tracking // Database tracking
private var chatDao: ChatDao? = null private var chatDao: ChatDao? = null
@@ -28,7 +35,7 @@ object LlmManager {
// Initialization now makes the dao optional so the UI can safely call it! // Initialization now makes the dao optional so the UI can safely call it!
fun initialize( fun initialize(
dao: ChatDao?, mode: String, url: String, modelName: String, apiKey: String, systemPrompt: String context: Context, dao: ChatDao?, mode: String, url: String, modelName: String, apiKey: String, systemPrompt: String
) { ) {
// Only update the DAO if one was passed in (like on app boot) // Only update the DAO if one was passed in (like on app boot)
if (dao != null) { if (dao != null) {
@@ -50,8 +57,37 @@ object LlmManager {
.logRequests(true) .logRequests(true)
.logResponses(true) .logResponses(true)
.build() .build()
} else if (mode == "Local") {
// Grab the absolute path from the registry
val fullPath = ModelRegistry.getModelPath(context, modelName)
// NEW: The Switchboard! Route the path to the correct engine based on file type
when {
fullPath.endsWith(".task") || fullPath.endsWith(".litertlm") -> {
Log.d("AliceEngine", "Routing to MediaPipe Engine (Formula 1 Mode)")
// Reset to standby when a new model is selected
_hardwareBackend.value = "Standby"
chatModel = MediaPipeAdapter(context, fullPath) { systemEvent ->
// Intercept the hardware broadcast
if (systemEvent.startsWith("HARDWARE_STATE:")) {
_hardwareBackend.value = systemEvent.removePrefix("HARDWARE_STATE:").trim()
} else { } else {
chatModel = null // MLC Engine goes here later! Log.w("AliceSystem", systemEvent)
}
}
}
fullPath.endsWith(".gguf") -> {
Log.d("AliceEngine", "Routing to Llama.cpp Engine (Flexible Mode)")
// Llama.cpp manages its own Vulkan backend, so we just label it
_hardwareBackend.value = "Vulkan"
chatModel = LlamaCppAdapter(fullPath)
}
else -> {
Log.e("AliceEngine", "Unsupported model file extension: $fullPath")
chatModel = null
}
}
} }
// Database Startup Logic // Database Startup Logic
@@ -102,47 +138,66 @@ object LlmManager {
} }
fun chat(userText: String): String { fun chat(userText: String): String {
if (currentMode == "MLC") return "System: MLC LLM On-Device engine is selected but not yet installed." if (currentMode == "Local" && chatModel == null) return "System: Local engine is selected but not properly initialized or unsupported file format."
val currentModel = chatModel ?: return "Error: LLM engine not initialized." val currentModel = chatModel ?: return "Error: LLM engine not initialized."
// If the history size is 1, it means only the System prompt exists. This is the first message!
if (chatHistory.size == 1) { if (chatHistory.size == 1) {
// Take the first 25 characters. If it's longer, add "..."
val previewLength = 25 val previewLength = 25
val newTitle = if (userText.length > previewLength) { val newTitle = if (userText.length > previewLength) userText.take(previewLength).trim() + "..." else userText
userText.take(previewLength).trim() + "..."
} else {
userText
}
// Update the database instantly
chatDao?.updateThreadTitle(currentThreadId, newTitle) chatDao?.updateThreadTitle(currentThreadId, newTitle)
} }
// 1. Save user message to DB and Memory
chatDao?.insertMessage(ChatMessageEntity(threadId = currentThreadId, text = userText, isUser = true)) chatDao?.insertMessage(ChatMessageEntity(threadId = currentThreadId, text = userText, isUser = true))
chatHistory.add(UserMessage(userText)) chatHistory.add(UserMessage(userText))
val toolSpecs = SkillManager.loadSkills() val toolSpecs = SkillManager.loadSkills()
// --- LOOP CONTROL CONSTANTS ---
val MAX_TOOL_ITERATIONS = 5
var toolIterations = 0
val executedToolSignatures = mutableSetOf<String>() // Tracks name+args pairs to catch spin loops
var response = currentModel.generate(chatHistory, toolSpecs) var response = currentModel.generate(chatHistory, toolSpecs)
var aiMessage: AiMessage = response.content() var aiMessage: AiMessage = response.content()
chatHistory.add(aiMessage) chatHistory.add(aiMessage)
while (aiMessage.hasToolExecutionRequests()) { while (aiMessage.hasToolExecutionRequests()) {
// --- GUARD 1: Hard iteration cap ---
if (toolIterations >= MAX_TOOL_ITERATIONS) {
Log.w("AliceEngine", "Tool loop cap reached after $MAX_TOOL_ITERATIONS iterations. Breaking.")
val fallbackText = "I've reached the maximum number of steps trying to complete this task. Here's what I found so far."
chatDao?.insertMessage(ChatMessageEntity(threadId = currentThreadId, text = fallbackText, isUser = false))
return fallbackText
}
for (request in aiMessage.toolExecutionRequests()) { for (request in aiMessage.toolExecutionRequests()) {
val toolName = request.name() val toolName = request.name()
val arguments = request.arguments() val arguments = request.arguments()
// --- GUARD 2: Duplicate call detection ---
val signature = "$toolName::$arguments"
if (executedToolSignatures.contains(signature)) {
Log.w("AliceEngine", "Duplicate tool call detected for '$toolName'. Breaking loop.")
val fallbackText = "I seem to be going in circles with the '$toolName' tool. Let me stop and give you what I have."
chatDao?.insertMessage(ChatMessageEntity(threadId = currentThreadId, text = fallbackText, isUser = false))
return fallbackText
}
executedToolSignatures.add(signature)
val toolResult = SkillManager.executeSkill(toolName, arguments) val toolResult = SkillManager.executeSkill(toolName, arguments)
Log.d("AliceSkill", "TOOL_RESULT from [$toolName]: $toolResult")
chatHistory.add(ToolExecutionResultMessage(request.id(), toolName, toolResult)) chatHistory.add(ToolExecutionResultMessage(request.id(), toolName, toolResult))
} }
toolIterations++
response = currentModel.generate(chatHistory, toolSpecs) response = currentModel.generate(chatHistory, toolSpecs)
aiMessage = response.content() aiMessage = response.content()
chatHistory.add(aiMessage) chatHistory.add(aiMessage)
} }
// 2. Save final AI message to DB
chatDao?.insertMessage(ChatMessageEntity(threadId = currentThreadId, text = aiMessage.text(), isUser = false)) chatDao?.insertMessage(ChatMessageEntity(threadId = currentThreadId, text = aiMessage.text(), isUser = false))
return aiMessage.text() return aiMessage.text()
} }
} }

View File

@@ -17,6 +17,9 @@ import androidx.compose.material.icons.filled.Menu
import androidx.compose.material.icons.filled.Send import androidx.compose.material.icons.filled.Send
import androidx.compose.material.icons.filled.Add import androidx.compose.material.icons.filled.Add
import androidx.compose.material.icons.filled.List import androidx.compose.material.icons.filled.List
import androidx.compose.material.icons.filled.CheckCircle
import androidx.compose.material.icons.filled.Download
import androidx.compose.material3.LinearProgressIndicator
import androidx.compose.material3.* import androidx.compose.material3.*
import androidx.compose.runtime.* import androidx.compose.runtime.*
import androidx.compose.ui.Alignment import androidx.compose.ui.Alignment
@@ -47,6 +50,8 @@ class MainActivity : ComponentActivity() {
fun MainChatScreen() { fun MainChatScreen() {
val drawerState = rememberDrawerState(initialValue = DrawerValue.Closed) val drawerState = rememberDrawerState(initialValue = DrawerValue.Closed)
val scope = rememberCoroutineScope() val scope = rememberCoroutineScope()
// Observe the live hardware state
val hardwareBackend by LlmManager.hardwareBackend.collectAsState()
var currentScreen by remember { mutableStateOf("Chat") } var currentScreen by remember { mutableStateOf("Chat") }
var inputText by remember { mutableStateOf("") } var inputText by remember { mutableStateOf("") }
@@ -92,6 +97,19 @@ fun MainChatScreen() {
} }
) )
// --- NEW: Conditional Model Manager Button ---
if (LlmManager.currentMode == "Local") {
NavigationDrawerItem(
label = { Text("Model Manager") },
selected = currentScreen == "ModelManager",
icon = { Icon(Icons.Default.Add, contentDescription = "Download") }, // You can change this icon!
onClick = {
scope.launch { drawerState.close() }
currentScreen = "ModelManager"
}
)
}
Spacer(modifier = Modifier.height(16.dp)) Spacer(modifier = Modifier.height(16.dp))
HorizontalDivider() HorizontalDivider()
Text("Chat History", modifier = Modifier.padding(16.dp), style = MaterialTheme.typography.titleMedium, color = MaterialTheme.colorScheme.primary) Text("Chat History", modifier = Modifier.padding(16.dp), style = MaterialTheme.typography.titleMedium, color = MaterialTheme.colorScheme.primary)
@@ -119,7 +137,29 @@ fun MainChatScreen() {
Scaffold( Scaffold(
topBar = { topBar = {
TopAppBar( TopAppBar(
title = { Text("Alice Agent") }, title = {
Row(verticalAlignment = Alignment.CenterVertically) {
Text("Alice Agent")
// The Live Hardware Telemetry Badge
if (hardwareBackend != "Standby") {
Spacer(modifier = Modifier.width(8.dp))
Surface(
color = if (hardwareBackend == "GPU") MaterialTheme.colorScheme.primary
else MaterialTheme.colorScheme.error,
shape = RoundedCornerShape(4.dp)
) {
Text(
text = hardwareBackend,
style = MaterialTheme.typography.labelSmall,
color = if (hardwareBackend == "GPU") MaterialTheme.colorScheme.onPrimary
else MaterialTheme.colorScheme.onError,
modifier = Modifier.padding(horizontal = 6.dp, vertical = 2.dp)
)
}
}
}
},
navigationIcon = { navigationIcon = {
IconButton(onClick = { scope.launch { drawerState.open() } }) { IconButton(onClick = { scope.launch { drawerState.open() } }) {
Icon(Icons.Default.Menu, contentDescription = "Menu") Icon(Icons.Default.Menu, contentDescription = "Menu")
@@ -185,7 +225,7 @@ fun MainChatScreen() {
val response = LlmManager.chat(userText) val response = LlmManager.chat(userText)
messages = messages + ChatMessage(response, false) messages = messages + ChatMessage(response, false)
} catch (e: Exception) { } catch (e: Exception) {
messages = messages + ChatMessage("Connection Error: Is the local LLM server running?", false) messages = messages + ChatMessage("System Error: ${e.message}", false)
} }
} }
} }
@@ -203,6 +243,11 @@ fun MainChatScreen() {
onBackClicked = { currentScreen = "Chat" } onBackClicked = { currentScreen = "Chat" }
) )
} }
else if (currentScreen == "ModelManager") {
ModelManagerScreen(
onBackClicked = { currentScreen = "Chat" }
)
}
} }
} }
@@ -283,10 +328,10 @@ fun SettingsScreen(onBackClicked: () -> Unit) {
Text("Remote API") Text("Remote API")
Spacer(modifier = Modifier.width(16.dp)) Spacer(modifier = Modifier.width(16.dp))
RadioButton( RadioButton(
selected = llmMode == "MLC", selected = llmMode == "Local",
onClick = { llmMode = "MLC" } onClick = { llmMode = "Local" }
) )
Text("Local (MLC LLM)") Text("Local (Llama.cpp)")
} }
Spacer(modifier = Modifier.height(8.dp)) Spacer(modifier = Modifier.height(8.dp))
@@ -366,7 +411,7 @@ fun SettingsScreen(onBackClicked: () -> Unit) {
.putString("systemPrompt", systemPrompt) .putString("systemPrompt", systemPrompt)
.apply() .apply()
LlmManager.initialize(null, llmMode, llmUrl, modelName, apiKey, systemPrompt) LlmManager.initialize(context, null, llmMode, llmUrl, modelName, apiKey, systemPrompt)
SkillManager.updateDirectory(skillsPath) SkillManager.updateDirectory(skillsPath)
onBackClicked() onBackClicked()
@@ -379,3 +424,144 @@ fun SettingsScreen(onBackClicked: () -> Unit) {
} }
} }
} }
@OptIn(ExperimentalMaterial3Api::class)
@Composable
fun ModelManagerScreen(onBackClicked: () -> Unit) {
val context = LocalContext.current
val scope = rememberCoroutineScope()
val prefs = context.getSharedPreferences("AlicePrefs", Context.MODE_PRIVATE)
// Track which model the user currently has selected as their active brain
var activeModelName by remember { mutableStateOf(prefs.getString("modelName", "") ?: "") }
// Keep track of download progress percentages for each model ID
val downloadProgress = remember { mutableStateMapOf<String, Int>() }
Scaffold(
topBar = {
TopAppBar(
title = { Text("Local Model Manager") },
navigationIcon = {
IconButton(onClick = onBackClicked) {
Icon(Icons.Default.ArrowBack, contentDescription = "Back")
}
},
colors = TopAppBarDefaults.topAppBarColors(
containerColor = MaterialTheme.colorScheme.secondaryContainer,
titleContentColor = MaterialTheme.colorScheme.onSecondaryContainer
)
)
}
) { paddingValues ->
LazyColumn(
modifier = Modifier
.fillMaxSize()
.padding(paddingValues)
.padding(16.dp),
verticalArrangement = Arrangement.spacedBy(16.dp)
) {
item {
Text(
"Qwen 2.5 Architecture",
style = MaterialTheme.typography.titleMedium,
color = MaterialTheme.colorScheme.primary
)
Text(
"These models will run entirely on your device's GPU. Larger models are smarter but consume more battery and generate text slower.",
style = MaterialTheme.typography.bodySmall,
color = MaterialTheme.colorScheme.onSurfaceVariant
)
Spacer(modifier = Modifier.height(8.dp))
}
items(ModelRegistry.curatedModels) { model ->
val isDownloaded = ModelRegistry.isModelDownloaded(context, model.fileName)
val currentProgress = downloadProgress[model.id] ?: 0
val isActive = activeModelName == model.fileName
Card(
modifier = Modifier.fillMaxWidth(),
shape = RoundedCornerShape(12.dp),
colors = CardDefaults.cardColors(containerColor = MaterialTheme.colorScheme.surfaceVariant)
) {
Column(modifier = Modifier.padding(16.dp)) {
Row(
modifier = Modifier.fillMaxWidth(),
horizontalArrangement = Arrangement.SpaceBetween,
verticalAlignment = Alignment.CenterVertically
) {
Text(model.name, style = MaterialTheme.typography.titleMedium)
Text("${model.sizeMb} MB", style = MaterialTheme.typography.labelMedium)
}
Spacer(modifier = Modifier.height(4.dp))
Text(model.description, style = MaterialTheme.typography.bodySmall)
Spacer(modifier = Modifier.height(16.dp))
if (isDownloaded) {
Button(
onClick = {
// Save the exact filename so LlmManager knows which one to boot up
prefs.edit().putString("modelName", model.fileName).apply()
activeModelName = model.fileName
// NEW: Hot-reload the LlmManager instantly!
val mode = prefs.getString("llmMode", "Local") ?: "Local"
val url = prefs.getString("llmUrl", "") ?: ""
val apiKey = prefs.getString("apiKey", "") ?: ""
val prompt = prefs.getString("systemPrompt", "You are a helpful AI assistant.") ?: "You are a helpful AI assistant."
LlmManager.initialize(context, null, mode, url, model.fileName, apiKey, prompt)
},
modifier = Modifier.fillMaxWidth(),
colors = ButtonDefaults.buttonColors(
containerColor = if (isActive) MaterialTheme.colorScheme.primary else MaterialTheme.colorScheme.secondary
)
) {
if (isActive) {
Icon(Icons.Default.CheckCircle, contentDescription = "Active")
Spacer(modifier = Modifier.width(8.dp))
Text("Active Model")
} else {
Text("Set as Active")
}
}
} else if (currentProgress > 0 && currentProgress < 100) {
Column(modifier = Modifier.fillMaxWidth()) {
Text("Downloading: $currentProgress%", style = MaterialTheme.typography.labelMedium)
Spacer(modifier = Modifier.height(4.dp))
LinearProgressIndicator(
progress = { currentProgress / 100f },
modifier = Modifier.fillMaxWidth()
)
}
} else {
Button(
onClick = {
// Initialize progress
downloadProgress[model.id] = 1
// Launch the background download
scope.launch {
// Grab the Hugging Face token from the API Key settings field
val hfToken = prefs.getString("apiKey", "") ?: ""
ModelDownloader.downloadModel(context, model.downloadUrl, model.fileName, hfToken)
.collect { progress ->
downloadProgress[model.id] = progress
}
}
},
modifier = Modifier.fillMaxWidth()
) {
Icon(Icons.Default.Download, contentDescription = "Download")
Spacer(modifier = Modifier.width(8.dp))
Text("Download")
}
}
}
}
}
}
}
}

View File

@@ -0,0 +1,175 @@
package net.mmanningau.alice
import android.content.Context
import android.util.Log
import com.google.mediapipe.tasks.genai.llminference.LlmInference
import dev.langchain4j.agent.tool.ToolExecutionRequest
import dev.langchain4j.agent.tool.ToolSpecification
import dev.langchain4j.data.message.AiMessage
import dev.langchain4j.data.message.ChatMessage
import dev.langchain4j.data.message.SystemMessage
import dev.langchain4j.data.message.ToolExecutionResultMessage
import dev.langchain4j.data.message.UserMessage
import dev.langchain4j.model.chat.ChatLanguageModel
import dev.langchain4j.model.output.Response
import org.json.JSONObject
import java.io.File
import java.util.UUID
class MediaPipeAdapter(
private val context: Context,
private val modelPath: String,
private val onSystemEvent: (String) -> Unit // Flexible routing for UI notifications & Accessibility
) : ChatLanguageModel {
private var engine: LlmInference? = null
private fun getOrInitEngine(): LlmInference {
if (engine == null) {
val modelFile = File(modelPath)
if (!modelFile.exists()) {
throw IllegalStateException("Task file not found: $modelPath. Please download it.")
}
try {
// THE PUSH: Aggressively demand the Adreno GPU
val gpuOptions = LlmInference.LlmInferenceOptions.builder()
.setModelPath(modelPath)
.setMaxTokens(1200)
.setPreferredBackend(LlmInference.Backend.GPU)
.build()
engine = LlmInference.createFromOptions(context, gpuOptions)
Log.d("AliceEngine", "Formula 1 Mode: GPU Initialized successfully.")
// Broadcast the successful hardware lock!
onSystemEvent("HARDWARE_STATE: GPU")
} catch (e: Exception) {
// THE FALLBACK: If GPU fails, notify the UI and drop to CPU
Log.e("AliceEngine", "GPU Initialization failed: ${e.message}")
onSystemEvent("Hardware Notice: GPU not supported for this model. Falling back to CPU. Generation will be slower and consume more battery.")
val cpuOptions = LlmInference.LlmInferenceOptions.builder()
.setModelPath(modelPath)
.setMaxTokens(1200)
.setPreferredBackend(LlmInference.Backend.CPU)
.build()
engine = LlmInference.createFromOptions(context, cpuOptions)
// Broadcast the fallback
onSystemEvent("HARDWARE_STATE: CPU")
}
}
return engine!!
}
override fun generate(messages: List<ChatMessage>): Response<AiMessage> {
return generate(messages, emptyList())
}
override fun generate(
messages: List<ChatMessage>,
toolSpecifications: List<ToolSpecification>
): Response<AiMessage> {
val activeEngine = getOrInitEngine()
val promptBuilder = java.lang.StringBuilder()
// 1. The Strict Negative-Constraint Schema
val toolsPrompt = java.lang.StringBuilder()
if (toolSpecifications.isNotEmpty()) {
toolsPrompt.append("\n\n# AVAILABLE TOOLS\n")
for (tool in toolSpecifications) {
toolsPrompt.append("- ${tool.name()}: ${tool.description()} | Params: ${tool.parameters()?.toString() ?: "{}"}\n")
}
toolsPrompt.append("\nCRITICAL RULES:\n")
toolsPrompt.append("1. NEVER guess or fabricate data (like battery levels, IP addresses, or network latency). You MUST use a tool to fetch real data.\n")
toolsPrompt.append("2. Do NOT invent your own syntax. You must use ONLY the exact JSON format below.\n")
toolsPrompt.append("3. To execute a tool, reply with ONLY this JSON object and absolutely no other text:\n")
toolsPrompt.append("{\"name\": \"<tool_name>\", \"arguments\": {<args>}}\n")
}
// 2. Format Chat History using GEMMA 3 TAGS (Merging System into User)
var isFirstUserMessage = true
for (message in messages) {
when (message) {
is SystemMessage -> {
// IGNORE: We do not append a 'system' tag because Gemma 3 doesn't support it.
// We already built the toolsPrompt string in Step 1, so we just hold it.
}
is UserMessage -> {
if (isFirstUserMessage) {
// Merge the draconian tools prompt and the user's first message into one block
promptBuilder.append("<start_of_turn>user\n${toolsPrompt.toString()}\n\n${message.text()}<end_of_turn>\n")
isFirstUserMessage = false
} else {
promptBuilder.append("<start_of_turn>user\n${message.text()}<end_of_turn>\n")
}
}
is ToolExecutionResultMessage -> {
promptBuilder.append("<start_of_turn>user\n[TOOL RESULT: ${message.toolName()}]\n${message.text()}\n\nIMPORTANT: The above is raw data from a tool. Do NOT repeat it verbatim. You must now write a naturally worded and concise response to the user's original question using this data. Summarise it concisely as Alice their helpful AI assistant.<end_of_turn>\n")
}
is AiMessage -> {
if (message.hasToolExecutionRequests()) {
val request = message.toolExecutionRequests()[0]
promptBuilder.append("<start_of_turn>model\n{\"name\": \"${request.name()}\", \"arguments\": ${request.arguments()}}<end_of_turn>\n")
} else {
val cleanText = message.text()?.replace(Regex("Calling tool:.*?\\.\\.\\."), "")?.trim() ?: ""
if (cleanText.isNotBlank()) {
promptBuilder.append("<start_of_turn>model\n$cleanText<end_of_turn>\n")
}
}
}
}
}
promptBuilder.append("<start_of_turn>model\n")
// 3. Execution on MediaPipe
val rawResponse = activeEngine.generateResponse(promptBuilder.toString())
Log.d("AliceEngine", "RAW_RESPONSE_LENGTH: ${rawResponse.length}")
Log.d("AliceEngine", "RAW_RESPONSE: $rawResponse")
Log.d("AliceEngine", "Engine state after gen - messages count: ${messages.size}")
val responseText = rawResponse.replace("<end_of_turn>", "").trim()
// Strip the markdown code blocks if Gemma adds them
val cleanText = responseText.replace(Regex("```(?:json)?"), "").replace("```", "").trim()
// 4. The Bulletproof Regex JSON Parser
if (toolSpecifications.isNotEmpty()) {
// Hunt directly for the tool name, bypassing strict JSON validation
val nameRegex = Regex("\"name\"\\s*:\\s*\"([^\"]+)\"")
val match = nameRegex.find(cleanText)
if (match != null) {
val toolName = match.groupValues[1]
// Safely attempt to grab arguments. If they are hallucinated garbage, default to {}
var argumentsJson = "{}"
// Old regex - pre Claude ..... val argRegex = Regex("\"arguments\"\\s*:\\s*(\\{.*?\\})")
val argRegex = Regex("\"arguments\"\\s*:\\s*(\\{.*?\\})", RegexOption.DOT_MATCHES_ALL)
val argMatch = argRegex.find(cleanText)
if (argMatch != null) {
val foundArgs = argMatch.groupValues[1]
try {
// Test if the args are valid JSON
JSONObject(foundArgs)
argumentsJson = foundArgs
} catch (e: Exception) {
// It was garbage (like the infinite 7777s). Keep the "{}" default.
Log.w("AliceEngine", "Discarded malformed arguments: $foundArgs")
}
}
val request = ToolExecutionRequest.builder()
.id(UUID.randomUUID().toString())
.name(toolName)
.arguments(argumentsJson)
.build()
return Response.from(AiMessage.from("Calling tool: $toolName...", listOf(request)))
}
}
return Response.from(AiMessage(cleanText))
}
}

View File

@@ -0,0 +1,75 @@
package net.mmanningau.alice
import android.app.DownloadManager
import android.content.Context
import android.net.Uri
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.delay
import kotlinx.coroutines.flow.Flow
import kotlinx.coroutines.flow.flow
import kotlinx.coroutines.flow.flowOn
import java.io.File
object ModelDownloader {
fun downloadModel(context: Context, url: String, fileName: String, hfToken: String = ""): Flow<Int> = flow {
val downloadManager = context.getSystemService(Context.DOWNLOAD_SERVICE) as DownloadManager
// Ensure the directory exists
val modelsDir = ModelRegistry.getModelsDirectory(context)
val request = DownloadManager.Request(Uri.parse(url))
.setTitle(fileName)
.setDescription("Downloading AI Model for Alice...")
.setNotificationVisibility(DownloadManager.Request.VISIBILITY_VISIBLE_NOTIFY_COMPLETED)
.setDestinationUri(Uri.fromFile(File(modelsDir, fileName)))
.setAllowedOverMetered(true) // Allow cellular downloads
// THE FIX: Inject the Hugging Face Authorization header so we bypass the gate
if (hfToken.isNotBlank()) {
request.addRequestHeader("Authorization", "Bearer $hfToken")
}
val downloadId = downloadManager.enqueue(request)
var finishDownload = false
var progress = 0
// Ping the OS every second to get the latest percentage
while (!finishDownload) {
val query = DownloadManager.Query().setFilterById(downloadId)
val cursor = downloadManager.query(query)
if (cursor.moveToFirst()) {
val statusIndex = cursor.getColumnIndex(DownloadManager.COLUMN_STATUS)
val status = cursor.getInt(statusIndex)
when (status) {
DownloadManager.STATUS_SUCCESSFUL -> {
finishDownload = true
emit(100)
}
DownloadManager.STATUS_FAILED -> {
finishDownload = true
emit(-1) // Error state
}
DownloadManager.STATUS_RUNNING -> {
val downloadedIndex = cursor.getColumnIndex(DownloadManager.COLUMN_BYTES_DOWNLOADED_SO_FAR)
val totalIndex = cursor.getColumnIndex(DownloadManager.COLUMN_TOTAL_SIZE_BYTES)
val bytesDownloaded = cursor.getLong(downloadedIndex)
val bytesTotal = cursor.getLong(totalIndex)
if (bytesTotal > 0) {
progress = ((bytesDownloaded * 100L) / bytesTotal).toInt()
emit(progress)
}
}
}
}
cursor.close()
if (!finishDownload) {
delay(1000)
}
}
}.flowOn(Dispatchers.IO)
}

View File

@@ -0,0 +1,92 @@
package net.mmanningau.alice
import android.content.Context
import java.io.File
data class LocalModel(
val id: String,
val name: String,
val description: String,
val fileName: String,
val downloadUrl: String,
val sizeMb: Int
)
object ModelRegistry {
val curatedModels = listOf(
LocalModel(
id = "qwen-0.5b",
name = "Qwen 2.5 (0.5B)",
description = "Ultra-light and lightning fast. Best for quick tasks and basic tool triggering.",
fileName = "qwen2.5-0.5b-instruct-q4_k_m.gguf",
downloadUrl = "https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct-GGUF/resolve/main/qwen2.5-0.5b-instruct-q4_k_m.gguf",
sizeMb = 398
),
LocalModel(
id = "qwen-1.5b",
name = "Qwen 2.5 (1.5B)",
description = "The perfect daily driver. Excellent balance of speed, intelligence, and battery efficiency.",
fileName = "qwen2.5-1.5b-instruct-q4_k_m.gguf",
downloadUrl = "https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct-GGUF/resolve/main/qwen2.5-1.5b-instruct-q4_k_m.gguf",
sizeMb = 1120
),
LocalModel(
id = "qwen-coder-3b",
name = "Qwen 2.5 Coder (3B)",
description = "Specialized for programming. Fantastic for generating Python scripts and home lab configurations.",
fileName = "qwen2.5-coder-3b-instruct-q4_k_m.gguf",
downloadUrl = "https://huggingface.co/Qwen/Qwen2.5-Coder-3B-Instruct-GGUF/resolve/main/qwen2.5-coder-3b-instruct-q4_k_m.gguf",
sizeMb = 2020
),
LocalModel(
id = "qwen-3b",
name = "Qwen 2.5 (3B)",
description = "The Heavyweight. The highest quality conversational responses your device can comfortably run.",
fileName = "qwen2.5-3b-instruct-q4_k_m.gguf",
downloadUrl = "https://huggingface.co/Qwen/Qwen2.5-3B-Instruct-GGUF/resolve/main/qwen2.5-3b-instruct-q4_k_m.gguf",
sizeMb = 2020
),
LocalModel(
id = "gemma3-1b",
name = "Gemma 3 (1B)",
sizeMb = 555, // Update these sizes based on the exact HuggingFace .task file
description = "Google's highly optimized mobile intelligence. Best balance of speed and reasoning.",
fileName = "gemma-3-1b-it.task",
downloadUrl = "https://huggingface.co/litert-community/Gemma3-1B-IT/resolve/main/gemma3-1b-it-int4.task" // Update with exact raw URL
),
LocalModel(
id = "gemma3n-e2b",
name = "Gemma 3n (E2B)",
sizeMb = 3600,
description = "Elastic architecture. Activates fewer parameters for battery efficiency while maintaining high logic.",
fileName = "gemma-3n-e2b-it.task",
downloadUrl = "https://huggingface.co/google/gemma-3n-E2B-it-litert-lm/resolve/main/gemma-3n-E2B-it-int4.litertlm"
),
LocalModel(
id = "Qwen2.5-1.5B-Instruct_seq128_q8_ekv1280",
name = "Qwen2.5-1.5B",
sizeMb = 1570,
description = "A highly optimised and fine tuned model for agentic tasks and function calling.",
fileName = "Qwen2.5-1.5B-Instruct_seq128_q8_ekv1280.task",
downloadUrl = "https://huggingface.co/litert-community/Qwen2.5-1.5B-Instruct/resolve/main/Qwen2.5-1.5B-Instruct_seq128_q8_ekv1280.task"
)
)
fun getModelsDirectory(context: Context): File {
val dir = File(context.getExternalFilesDir(null), "Models")
if (!dir.exists()) {
dir.mkdirs()
}
return dir
}
fun isModelDownloaded(context: Context, fileName: String): Boolean {
val file = File(getModelsDirectory(context), fileName)
return file.exists() && file.length() > 0
}
fun getModelPath(context: Context, fileName: String): String {
return File(getModelsDirectory(context), fileName).absolutePath
}
}

View File

@@ -1,10 +1,12 @@
package net.mmanningau.alice package net.mmanningau.alice
import android.content.Context import android.content.Context
import android.net.Uri
import android.util.Log import android.util.Log
import com.chaquo.python.Python import com.chaquo.python.Python
import dev.langchain4j.agent.tool.JsonSchemaProperty import dev.langchain4j.agent.tool.JsonSchemaProperty
import dev.langchain4j.agent.tool.ToolSpecification import dev.langchain4j.agent.tool.ToolSpecification
import org.json.JSONArray
import org.json.JSONObject import org.json.JSONObject
import java.io.File import java.io.File
@@ -12,7 +14,11 @@ object SkillManager {
var skillsDirectory: File? = null var skillsDirectory: File? = null
private set private set
// *** ADDED: store context for ContentResolver access
private var appContext: Context? = null
fun initialize(context: Context) { fun initialize(context: Context) {
appContext = context.applicationContext // *** ADDED
val baseDir = context.getExternalFilesDir(null) val baseDir = context.getExternalFilesDir(null)
val skillsDir = File(baseDir, "Skills") val skillsDir = File(baseDir, "Skills")
@@ -25,7 +31,7 @@ object SkillManager {
fun updateDirectory(newPath: String) { fun updateDirectory(newPath: String) {
val newDir = File(newPath) val newDir = File(newPath)
if (!newDir.exists()) { if (!newDir.exists()) {
newDir.mkdirs() // Create it if the user typed a new path newDir.mkdirs()
} }
skillsDirectory = newDir skillsDirectory = newDir
Log.i("AliceSkills", "Skills directory updated to: ${newDir.absolutePath}") Log.i("AliceSkills", "Skills directory updated to: ${newDir.absolutePath}")
@@ -48,13 +54,12 @@ object SkillManager {
.name(name) .name(name)
.description(description) .description(description)
// Parse the expected parameters so the LLM knows what to extract
val parameters = json.optJSONObject("parameters") val parameters = json.optJSONObject("parameters")
val properties = parameters?.optJSONObject("properties") val properties = parameters?.optJSONObject("properties")
properties?.keys()?.forEach { key -> properties?.keys()?.forEach { key ->
val prop = properties.getJSONObject(key) val prop = properties.getJSONObject(key)
val type = prop.getString("type") // e.g., "string" val type = prop.getString("type")
val desc = prop.optString("description", "") val desc = prop.optString("description", "")
builder.addParameter( builder.addParameter(
@@ -85,19 +90,56 @@ object SkillManager {
val py = Python.getInstance() val py = Python.getInstance()
val builtins = py.builtins val builtins = py.builtins
// We create an isolated dictionary for the script to run in.
// This allows you to edit the Python files and have them hot-reload instantly!
val globals = py.getModule("builtins").callAttr("dict") val globals = py.getModule("builtins").callAttr("dict")
// Execute the raw script text
builtins.callAttr("exec", scriptFile.readText(), globals) builtins.callAttr("exec", scriptFile.readText(), globals)
// Find the 'execute' function we mandated in our python script
val executeFunc = globals.callAttr("get", "execute") val executeFunc = globals.callAttr("get", "execute")
if (executeFunc == null) return "Error: Python script missing 'def execute(args_json):' function." if (executeFunc == null) return "Error: Python script missing 'def execute(args):' function."
// Call it and return the string! // First call to Python
executeFunc.call(argumentsJson).toString() var result = executeFunc.call(argumentsJson).toString()
// *** ADDED: Two-pass bridge for skills that need Android ContentResolver
if (result.startsWith("BRIDGE_REQUEST:")) {
val ctx = appContext
if (ctx == null) {
return "Error: SkillManager context not initialized — cannot perform ContentResolver query."
}
val request = JSONObject(result.removePrefix("BRIDGE_REQUEST:"))
val uri = Uri.parse(request.getString("uri"))
val limit = request.optInt("limit", 10)
val columns = arrayOf("_id", "address", "body", "date", "type", "read")
val smsArray = JSONArray()
val cursor = ctx.contentResolver.query(
uri, columns, null, null, "date DESC"
)
cursor?.use {
var count = 0
while (it.moveToNext() && count < limit) {
val row = JSONObject()
row.put("address", it.getString(it.getColumnIndexOrThrow("address")) ?: "")
row.put("body", it.getString(it.getColumnIndexOrThrow("body")) ?: "")
row.put("date", it.getString(it.getColumnIndexOrThrow("date")) ?: "")
row.put("type", it.getString(it.getColumnIndexOrThrow("type")) ?: "1")
row.put("read", it.getString(it.getColumnIndexOrThrow("read")) ?: "0")
smsArray.put(row)
count++
}
}
Log.i("AliceSkills", "SMS bridge: fetched ${smsArray.length()} messages from $uri")
// Re-inject the data and call Python a second time
val injectedArgs = JSONObject(argumentsJson.ifBlank { "{}" })
injectedArgs.put("sms_data", smsArray)
result = executeFunc.call(injectedArgs.toString()).toString()
}
// *** END ADDED
result
} catch (e: Exception) { } catch (e: Exception) {
Log.e("AliceSkills", "Execution failed for $toolName", e) Log.e("AliceSkills", "Execution failed for $toolName", e)

View File

@@ -38,6 +38,6 @@ class AliceApp : Application() {
).allowMainThreadQueries().build() // We use allowMainThreadQueries for immediate boot loading ).allowMainThreadQueries().build() // We use allowMainThreadQueries for immediate boot loading
// Pass the DAO into the manager! // Pass the DAO into the manager!
LlmManager.initialize(db.chatDao(), savedMode, savedUrl, savedModel, savedApiKey, savedSystemPrompt) LlmManager.initialize(this,db.chatDao(), savedMode, savedUrl, savedModel, savedApiKey, savedSystemPrompt)
} }
} }

View File

@@ -1,3 +1,4 @@
<resources> <resources>
<string name="app_name">Alice</string> <string name="app_name">Alice</string>
<string name="accessibility_service_description">Alice Screen Reader Service</string>
</resources> </resources>

View File

@@ -0,0 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<accessibility-service xmlns:android="http://schemas.android.com/apk/res/android"
android:accessibilityEventTypes="typeWindowStateChanged|typeWindowContentChanged"
android:accessibilityFeedbackType="feedbackGeneric"
android:accessibilityFlags="flagDefault"
android:canRetrieveWindowContent="true"
android:description="@string/accessibility_service_description" />

View File

@@ -5,4 +5,5 @@ plugins {
alias(libs.plugins.kotlin.compose) apply false alias(libs.plugins.kotlin.compose) apply false
// Add the Chaquopy plugin here // Add the Chaquopy plugin here
id("com.chaquo.python") version "15.0.1" apply false id("com.chaquo.python") version "15.0.1" apply false
id("com.google.devtools.ksp") version "2.2.0-2.0.2" apply false
} }

View File

@@ -1,6 +1,6 @@
[versions] [versions]
agp = "8.13.2" agp = "8.13.2"
kotlin = "2.0.21" kotlin = "2.2.0"
coreKtx = "1.17.0" coreKtx = "1.17.0"
junit = "4.13.2" junit = "4.13.2"
junitVersion = "1.3.0" junitVersion = "1.3.0"