diff --git a/.github/instructions/html-lang.instructions.md b/.github/instructions/html-lang.instructions.md
new file mode 100644
index 00000000..b2537c6f
--- /dev/null
+++ b/.github/instructions/html-lang.instructions.md
@@ -0,0 +1,23 @@
+---
+applyTo: '**/*.html'
+---
+
+# HTML Language Guide
+
+- Use 4 spaces per indentation level. No tabs.
+
+- Use double quotes for all HTML attributes. Ex: `
`
+
+- Self-closing tags should include the trailing slash. Ex: ``
+
+- Use semantic HTML5 elements where appropriate. Ex: ``, `
-
-
+
+
+
+
+
+
+
Choose the provider for this agent. Foundry agents cannot attach local actions.
@@ -83,131 +88,177 @@
Basic Information
Model & Connection
-
+
-
- Azure AI Foundry agents use Azure-managed tools; local actions are disabled for this agent type.
+
+ Foundry-managed tools are used for this agent type and local actions are disabled.
-
-
+
+
+
Endpoints come from global and workspace configurations.
+
+
+
+
-
-
+
+
+
Select a classic Foundry agent to import its identity.
-
-
+
+
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
This is populated from the selected endpoint or fetch response when available, but you can override it if your Foundry project requires a different supported Responses API version.
+
+
+
+
+
+
+
Fetch an application above to populate the published version automatically. The runtime invokes the application by name, so you do not need to type a version manually.
+
+
+
+
+
Optional for now. Phase 1 uses the Responses endpoint; activity polling can be added later.
+
-
+
-
+
+
+
+
-
-
-
-
-
-
-
-
Override global model connection settings
-
-
-
-
-
- Note: If a value is not provided for an inheritable property, it will be inherited from the selected model.
-
-
-
-
+ {% if not settings.enable_multi_model_endpoints %}
+
+
+
-
-
+
+
+
Override global model connection settings
-
-
-
-
- Inheritable
-
-
-
-
- Inheritable
-
-
-
-
- Inheritable
-
-
-
-
- Inheritable
-
-
-
-
- Optional
-
-
Only applies to models that support reasoning (e.g., gpt-5, o1, o3)
-
-
-
-
-
-
- Inheritable
-
+
+
+
+
+ Note: If a value is not provided for an inheritable property, it will be inherited from the selected model.
-
-
- Inheritable
-
+
+
+
+
+
+
+
-
-
- Inheritable
-
+
+
+
+
+ Inheritable
+
+
+
+
+ Inheritable
+
+
+
+
+ Inheritable
+
+
+
+
+ Inheritable
+
+
+
+
+ Optional
+
+
Only applies to models that support reasoning (e.g., gpt-5, o1, o3)
+
-
-
- Inheritable
-
+
+
+
+
+ Inheritable
+
+
+
+
+ Inheritable
+
+
+
+
+ Inheritable
+
+
+
+
+ Inheritable
+
+
-
+ {% endif %}
Instructions
- Instructions are managed in Azure AI Foundry.
+ Instructions are managed in Foundry for this agent type.
@@ -222,7 +273,7 @@
Instructions
- Actions are not available for Azure AI Foundry agents. Foundry-managed tools are used automatically.
+ Actions are not available for Foundry agents. Foundry-managed tools are used automatically.
@@ -257,7 +308,7 @@
Available Actions
-
+
@@ -270,7 +321,7 @@
Available Actions
-
+
@@ -287,11 +338,11 @@
Available Actions
-
+
-
+
@@ -325,27 +376,32 @@
Available Actions
Advanced Settings
-
-
-
-
Optional additional configuration settings for this agent in JSON format.
+
+ Advanced options are managed by Foundry and are not available for Foundry agents.
-
-
-
-
-
-
-
+
+
+
+
+
Optional additional configuration settings for this agent in JSON format.
-
-
-
-
- Specify the maximum number of tokens the model can generate in a single response.
- Set to -1 to use the model's default limit.
- Setting a higher cap only allows the model to generate longer answers—it does not force the extra tokens.
- Use this control mainly to keep answers concise or to shorten responses for sensitive channels.
+
+
+
+
+
+
+
+
+
+
+
+
+ Specify the maximum number of tokens the model can generate in a single response.
+ Set to -1 to use the model's default limit.
+ Setting a higher cap only allows the model to generate longer answers—it does not force the extra tokens.
+ Use this control mainly to keep answers concise or to shorten responses for sensitive channels.
+
diff --git a/application/single_app/templates/_multiendpoint_modal.html b/application/single_app/templates/_multiendpoint_modal.html
new file mode 100644
index 00000000..c505dd6d
--- /dev/null
+++ b/application/single_app/templates/_multiendpoint_modal.html
@@ -0,0 +1,133 @@
+
+
+
+
+
+
Model Endpoint
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ For APIM, choose the matching provider with API key auth. If using classic Foundry, use Foundry (classic). If using the application-based runtime, use New Foundry.
+
+
+
+
+
+
+
+
+
+
Project API versions use the v1 format (for example, v1).
+
+
+
+
+
OpenAI API versions use a dated format. For New Foundry, this value is used for Responses API calls and is inherited by the agent modal. No default is applied for New Foundry, so enter the supported version for your Foundry project explicitly.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ API key authentication is for inference only. Use a managed identity or service principal for model discovery.
+
{% endfor %}
{% else %}
- {# normal direct-Azure OpenAI flow: show whatever you fetched into settings.gpt_model.selected #}
- {% for model in settings.gpt_model.selected %}
-
- {% endfor %}
+ {% if settings.enable_gpt_apim %}
+ {# when using APIM, azure_apim_gpt_deployment may be "dep1" or "dep1,dep2,…" #}
+ {% set raw = settings.azure_apim_gpt_deployment or "" %}
+ {% set apim_list = raw.split(',') %}
+ {% for dep in apim_list %}
+ {% set d = dep.strip() %}
+
+ {% endfor %}
+ {% else %}
+ {# normal direct-Azure OpenAI flow: show whatever you fetched into settings.gpt_model.selected #}
+ {% for model in settings.gpt_model.selected %}
+
+ {% endfor %}
+ {% endif %}
{% endif %}
Group Prompts
- {% if settings.enable_semantic_kernel and settings.allow_group_agents %}
-
-
- Group Agents
-
-
+ {% if settings.per_user_semantic_kernel and settings.enable_semantic_kernel %}
+ {% if settings.allow_group_agents %}
+
+
+ Group Agents
+
+
+
+ {% if settings.enable_semantic_kernel and settings.allow_group_plugins %}
+
+
+ Group Actions
+
+
+ {% endif %}
{% endif %}
- {% if settings.enable_semantic_kernel and settings.allow_group_plugins %}
-
-
- Group Actions
-
-
+ {% if settings.allow_group_custom_endpoints %}
+
+
+ Group Endpoints
+
+
{% endif %}
@@ -742,7 +760,6 @@
Group Workspace
- {% if settings.enable_semantic_kernel and settings.allow_group_agents %}
Group Workspace
{% endif %}
+ {% if settings.enable_semantic_kernel and settings.allow_group_custom_endpoints and settings.enable_multi_model_endpoints %}
+
+
+
+
+
Group Model Endpoints
+
Manage model endpoints for this group. Global endpoints are managed by admins.
+
+
+
{% endif %}
+
+
diff --git a/application/single_app/utils_cache.py b/application/single_app/utils_cache.py
index 679d8334..2d474cd1 100644
--- a/application/single_app/utils_cache.py
+++ b/application/single_app/utils_cache.py
@@ -439,7 +439,7 @@ def get_cached_search_results(
logger.debug(f"Cache expired for key: {cache_key}")
try:
cosmos_search_cache_container.delete_item(item=cache_key, partition_key=partition_key)
- except:
+ except Exception as ex:
pass # Already deleted by TTL or doesn't exist
except CosmosResourceNotFoundError:
@@ -807,7 +807,7 @@ def get_cache_stats() -> Dict[str, Any]:
enable_cross_partition_query=True
))
expired_count = result_expired[0] if result_expired else 0
- except:
+ except Exception as ex:
expired_count = 0 # Ignore errors in counting expired
return {
diff --git a/docs/explanation/features/ADMIN_ACTIVITY_LOGGING.md b/docs/explanation/features/ADMIN_ACTIVITY_LOGGING.md
new file mode 100644
index 00000000..75db02dd
--- /dev/null
+++ b/docs/explanation/features/ADMIN_ACTIVITY_LOGGING.md
@@ -0,0 +1,41 @@
+# Admin Activity Logging (v0.236.017)
+
+## Overview and Purpose
+This feature adds a general-purpose activity log entry for admin actions so operational changes show up in the activity timeline.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.017**
+
+## Dependencies
+- Activity logs Cosmos container
+- Application Insights for telemetry
+
+## Technical Specifications
+### Architecture Overview
+- A helper function constructs a standardized activity record and writes it to the activity logs container.
+- Records include admin identity fields and a description for display in the UI timeline.
+
+### Configuration Options
+- None
+
+### File Structure
+- Logging helper: application/single_app/functions_activity_logging.py
+- Functional test: functional_tests/test_admin_action_activity_log.py
+
+## Usage Instructions
+### How to Log
+Call `log_general_admin_action()` with the admin user ID, admin email, and action string.
+Optionally pass a human-readable description and additional context.
+
+### Example
+- Action: "settings_updated"
+- Description: "Admin updated AI model settings"
+
+## Testing and Validation
+- Functional test: functional_tests/test_admin_action_activity_log.py
+
+## Known Limitations
+- Caller is responsible for invoking the helper when admin actions occur.
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.017**.
diff --git a/docs/explanation/features/CONTROL_CENTER_TOKEN_FILTERS.md b/docs/explanation/features/CONTROL_CENTER_TOKEN_FILTERS.md
new file mode 100644
index 00000000..85d3e803
--- /dev/null
+++ b/docs/explanation/features/CONTROL_CENTER_TOKEN_FILTERS.md
@@ -0,0 +1,54 @@
+# Control Center Token Filters
+
+Implemented in version: **0.239.164**
+
+## Overview and Purpose
+
+The Control Center dashboard now supports token-specific filtering so admins and control center users can explore token usage by the dimensions already captured in `token_usage` activity logs. This improves token analysis without requiring a separate analytics store.
+
+## Dependencies
+
+- `application/single_app/route_backend_control_center.py`
+- `application/single_app/templates/control_center.html`
+- `application/single_app/static/js/control-center.js`
+- `application/single_app/functions_activity_logging.py`
+- Cosmos `activity_logs` container token usage documents
+
+## Technical Specifications
+
+### Architecture Overview
+
+The token chart continues to use `token_usage` entries from the activity log container. The backend now parses optional token filters, applies them only to the token query path, and reuses the same filter payload for chart data, CSV export, and chat-export flows.
+
+### Supported Filters
+
+- User
+- Workspace type
+- Group
+- Public workspace
+- Model deployment name
+- Token type
+
+### File Structure
+
+- `route_backend_control_center.py`: token filter parsing, query helpers, token filter options endpoint, filtered export support
+- `control_center.html`: token filter controls added above the token chart
+- `control-center.js`: token filter state, filter option loading, request forwarding, and reset/apply behavior
+
+## Usage Instructions
+
+1. Open the Control Center dashboard.
+2. In the Token Usage card, choose one or more token filters.
+3. Click Apply to refresh the token chart with the selected scope.
+4. Use Reset to return to the unfiltered token view.
+5. Export activity trends to keep token CSV output aligned with the selected token filters.
+
+## Testing and Validation
+
+- Functional regression: `functional_tests/test_control_center_token_filters.py`
+- UI regression: `ui_tests/test_control_center_token_filters.py`
+
+## Known Limitations
+
+- Endpoint-level token filtering is not included in this version because endpoint metadata is not yet persisted in `token_usage` activity log records.
+- Token filters apply to the token chart and token export data only; the other dashboard charts remain global for the selected time range.
\ No newline at end of file
diff --git a/docs/explanation/features/DUAL_FOUNDRY_AGENT_SUPPORT.md b/docs/explanation/features/DUAL_FOUNDRY_AGENT_SUPPORT.md
new file mode 100644
index 00000000..c5a1b7b2
--- /dev/null
+++ b/docs/explanation/features/DUAL_FOUNDRY_AGENT_SUPPORT.md
@@ -0,0 +1,115 @@
+# Dual Foundry Agent Support
+
+Version implemented: 0.239.154
+Dependencies: Azure AI Foundry project endpoints, agent modal stepper, scoped agent CRUD routes, Semantic Kernel loader, Foundry runtime helpers
+
+Fixed/Implemented in version: **0.239.154**
+
+## Overview
+
+SimpleChat now supports both Foundry experiences at the same time instead of forcing a migration path.
+
+- `aifoundry` remains the persisted classic Foundry agent type and is displayed in the UI as `Foundry (classic)`.
+- `new_foundry` is a new persisted agent type for the application-based Foundry experience.
+- The agent modal, schema validation, backend payload sanitizer, loader, and chat runtime now recognize both modes side by side.
+
+This change is intentionally additive. Existing classic Foundry agents continue to work without migration.
+
+## Technical Specifications
+
+### Architecture overview
+
+The implementation separates the two Foundry paths instead of trying to coerce them into one shared runtime contract.
+
+- Classic Foundry continues to use the existing SDK-backed agent invocation flow.
+- New Foundry uses a separate runtime path that calls the application Responses endpoint directly.
+- The loader instantiates different agent wrappers based on `agent_type`.
+- The modal stepper renders separate configuration fields for classic and new Foundry while preserving the local agent flow.
+
+### Backend and runtime changes
+
+Files modified:
+- `application/single_app/functions_agent_payload.py`
+- `application/single_app/static/json/schemas/agent.schema.json`
+- `application/single_app/foundry_agent_runtime.py`
+- `application/single_app/semantic_kernel_loader.py`
+- `application/single_app/route_backend_chats.py`
+- `application/single_app/route_backend_models.py`
+- `application/single_app/route_backend_agents.py`
+- `application/single_app/functions_settings.py`
+- `application/single_app/functions_global_agents.py`
+
+Key behaviors:
+- `sanitize_agent_payload()` now accepts `local`, `aifoundry`, and `new_foundry`.
+- Classic Foundry still uses `other_settings.azure_ai_foundry`.
+- New Foundry uses `other_settings.new_foundry` with application-centric fields such as `application_id`, `application_name`, `application_version`, and `responses_api_version`.
+- `route_backend_chats.py` dispatches both Foundry types through Foundry-style invocation instead of falling back to local handling.
+- `foundry_agent_runtime.py` adds a dedicated new Foundry runtime that calls `/applications/{application}/protocols/openai/responses`.
+
+### Frontend changes
+
+Files modified:
+- `application/single_app/templates/_agent_modal.html`
+- `application/single_app/templates/_multiendpoint_modal.html`
+- `application/single_app/static/js/agent_modal_stepper.js`
+- `application/single_app/static/js/agents_common.js`
+- `application/single_app/static/js/admin/admin_model_endpoints.js`
+- `application/single_app/static/js/workspace/workspace_model_endpoints.js`
+- `application/single_app/static/js/workspace/view-utils.js`
+
+Key behaviors:
+- The agent modal now exposes `Local`, `Foundry (classic)`, and `New Foundry`.
+- Classic Foundry keeps the endpoint fetch-and-select flow for existing Foundry agents.
+- New Foundry exposes application reference fields and Responses API version fields.
+- New Foundry now supports fetching project agents/applications to populate the application identifier fields.
+- Endpoint management now includes a `new_foundry` provider option.
+
+## Usage Instructions
+
+### How to configure classic Foundry
+
+1. Open the agent modal.
+2. Select `Foundry (classic)`.
+3. Choose a configured Foundry endpoint.
+4. Fetch and select a classic Foundry agent.
+5. Save the agent.
+
+### How to configure new Foundry
+
+1. Open the agent modal.
+2. Select `New Foundry`.
+3. Choose or enter the Foundry project endpoint and project name.
+4. Enter the application identifier from the new Foundry portal.
+5. Enter the Responses API version.
+6. Save the agent.
+
+### Scope support
+
+Both classic and new Foundry agent types are supported in all three scopes:
+- Personal agents
+- Group agents
+- Global agents
+
+## Testing and Validation
+
+Functional coverage:
+- `functional_tests/test_dual_foundry_agent_support.py`
+
+UI coverage:
+- `ui_tests/test_agent_modal_dual_foundry_modes.py`
+
+Validation focus:
+- Classic Foundry payloads remain valid.
+- New Foundry payloads validate through the schema and sanitizer.
+- Runtime and loader files contain explicit support for `new_foundry`.
+- The modal exposes both Foundry modes and type-specific fields.
+
+## Known Limitations
+
+- Phase 1 does not add activity protocol polling yet.
+- Phase 1 expects existing new Foundry portal applications to already exist.
+- The dedicated web search Foundry configuration remains on the classic Foundry path.
+
+## Related Config Version Update
+
+The application version in `application/single_app/config.py` was incremented to `0.239.154` as part of this feature.
diff --git a/docs/explanation/features/LOGGED_CORE_SEMANTIC_KERNEL_PLUGINS.md b/docs/explanation/features/LOGGED_CORE_SEMANTIC_KERNEL_PLUGINS.md
new file mode 100644
index 00000000..5549cd7b
--- /dev/null
+++ b/docs/explanation/features/LOGGED_CORE_SEMANTIC_KERNEL_PLUGINS.md
@@ -0,0 +1,71 @@
+# Logged Core Semantic Kernel Plugins
+
+Version implemented: 0.239.153
+Implemented in version: **0.239.153**
+
+## Overview and Purpose
+
+This feature moves SimpleChat's built-in Semantic Kernel core plugins onto local subclasses that emit plugin invocation logs through the existing plugin invocation logger. The change covers the Time, Wait, Math, and Text plugins and ensures that core-plugin invocations can now surface as thought records, not just custom plugin calls.
+
+## Dependencies
+
+- `semantic_kernel_plugins.plugin_invocation_logger`
+- `semantic_kernel_plugins.plugin_invocation_thoughts`
+- `semantic_kernel_loader.py`
+- `route_backend_chats.py`
+- Semantic Kernel upstream core plugins from the `semantic-kernel` repository
+
+## Technical Specifications
+
+### Architecture Overview
+
+SimpleChat now owns thin subclasses for the upstream Semantic Kernel `TimePlugin`, `WaitPlugin`, `MathPlugin`, and `TextPlugin`. Each subclass lives in `application/single_app/semantic_kernel_plugins/` and calls the shared `auto_wrap_plugin_functions()` helper during initialization so inherited kernel functions receive the same invocation logging behavior as custom SimpleChat plugins.
+
+### Configuration and Loader Flow
+
+`semantic_kernel_loader.py` now imports the local SimpleChat versions of the Time and Wait plugins and continues to use the local Math and Text plugin modules. This means all existing loader paths that call `load_time_plugin()`, `load_wait_plugin()`, `load_math_plugin()`, and `load_text_plugin()` now register the logged subclasses automatically.
+
+### Thought Integration
+
+Thought formatting and callback registration were extracted into `semantic_kernel_plugins/plugin_invocation_thoughts.py`. The chat route reuses that helper in non-streaming agent execution, streaming agent execution, and the kernel-only Semantic Kernel fallback path so logged core-plugin invocations can generate `agent_tool_call` thoughts consistently. The formatter now emits user-readable summaries for wait and math operations, a parameter-aware fallback summary for other plugin calls, and an explicit `Invoking Plugin.Function` thought at the start of tool execution.
+
+### File Structure
+
+- `application/single_app/semantic_kernel_plugins/time_plugin.py`
+- `application/single_app/semantic_kernel_plugins/wait_plugin.py`
+- `application/single_app/semantic_kernel_plugins/math_plugin.py`
+- `application/single_app/semantic_kernel_plugins/text_plugin.py`
+- `application/single_app/semantic_kernel_plugins/plugin_invocation_logger.py`
+- `application/single_app/semantic_kernel_plugins/plugin_invocation_thoughts.py`
+- `application/single_app/semantic_kernel_plugins/logged_plugin_loader.py`
+- `application/single_app/semantic_kernel_loader.py`
+- `application/single_app/route_backend_chats.py`
+
+## Usage Instructions
+
+No additional configuration is required beyond the existing plugin enablement flags. When the Time, Wait, Math, or Text plugins are enabled in settings, the loader now registers the logged SimpleChat subclasses automatically.
+
+For runtime behavior:
+
+- Plugin invocations are recorded through the shared plugin invocation logger.
+- Agent and kernel-only chat paths can register thought callbacks using `register_plugin_invocation_thought_callback()`.
+- Thought content now includes human-readable operation summaries when possible, such as wait duration and math expressions/results, plus generic parameter summaries for other plugins.
+- Streaming chat now polls pending thoughts while a response is still in flight so long-running tool calls can replace the active status badge before any content tokens are returned.
+- Existing citation extraction still reads from the same invocation logger history.
+
+## Testing and Validation
+
+Functional coverage was added in `functional_tests/test_logged_core_plugins.py`.
+
+The test validates:
+
+- Inherited upstream methods are logged after auto-wrapping.
+- SimpleChat-specific Math extensions remain available and logged.
+- Async logging still works for the Wait plugin.
+- Invocation callbacks can be transformed into thought records for both success and failure cases.
+- Human-readable thought formatting includes meaningful wait, math, and generic plugin execution summaries.
+
+## Known Limitations
+
+- The standard upstream `HttpPlugin` fallback is not part of this first pass because SimpleChat already prefers `SmartHttpPlugin`.
+- Prompt-based core plugins such as conversation summarization were intentionally left out to keep the change focused on native function plugins.
\ No newline at end of file
diff --git a/docs/explanation/features/MODEL_ENDPOINT_API_KEY_MANUAL_MODELS.md b/docs/explanation/features/MODEL_ENDPOINT_API_KEY_MANUAL_MODELS.md
new file mode 100644
index 00000000..70992095
--- /dev/null
+++ b/docs/explanation/features/MODEL_ENDPOINT_API_KEY_MANUAL_MODELS.md
@@ -0,0 +1,50 @@
+# Model Endpoint API Key Manual Models (v0.236.019)
+
+## Overview and Purpose
+Adds manual model entry for API key-authenticated endpoints, with per-model connection tests and guidance to prefer identity-based discovery.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.019**
+
+## Dependencies
+- Admin model endpoint modal
+- Backend model test endpoint
+- Azure OpenAI / Foundry inference clients
+
+## Technical Specifications
+### Architecture Overview
+- API key endpoints skip discovery and allow manual model entries.
+- Each model row supports per-model connection testing.
+- Service principal auth includes management cloud and custom authority inputs.
+
+### API Endpoints
+- `/api/models/test-model` — tests a specific model deployment using the endpoint settings.
+
+### Configuration Options
+- `auth.management_cloud` — Public, Government, or Custom authority.
+- `auth.custom_authority` — custom authority URL for service principal auth.
+
+### File Structure
+- Modal UI: application/single_app/templates/admin_settings.html
+- Modal logic: application/single_app/static/js/admin/admin_model_endpoints.js
+- Backend test endpoint: application/single_app/route_backend_models.py
+
+## Usage Instructions
+### API Key Flow
+1. Choose Authentication Type: API Key.
+2. Use Add Model to enter deployment name, display name, and description.
+3. Use the per-model Test Connection button to verify access.
+
+### Service Principal Flow
+1. Choose Authentication Type: Service Principal.
+2. Select Management Cloud (Public/Government/Custom).
+3. For Custom, enter the authority URL.
+
+## Testing and Validation
+- Functional test: functional_tests/test_model_endpoints_api_key_manual_models.py
+
+## Known Limitations
+- API key auth supports inference only; discovery requires identity-based auth.
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.019**.
diff --git a/docs/explanation/features/MSGRAPH_PLUGIN_OPERATIONS.md b/docs/explanation/features/MSGRAPH_PLUGIN_OPERATIONS.md
new file mode 100644
index 00000000..271f0078
--- /dev/null
+++ b/docs/explanation/features/MSGRAPH_PLUGIN_OPERATIONS.md
@@ -0,0 +1,64 @@
+# Microsoft Graph Plugin Operations
+
+Implemented in version: **0.239.171**
+
+## Overview and Purpose
+
+The Microsoft Graph plugin now provides a stronger foundation for Graph-backed agent operations and adds practical read-focused capabilities for profile, calendar, mail, directory, and OneDrive access. The goal is to make the plugin more useful for day-to-day assistant workflows while handling consent and Graph list responses more safely.
+
+## Dependencies
+
+- `application/single_app/semantic_kernel_plugins/msgraph_plugin.py`
+- `application/single_app/functions_authentication.py`
+- `application/single_app/semantic_kernel_plugins/base_plugin.py`
+- `application/single_app/semantic_kernel_plugins/plugin_invocation_logger.py`
+- Microsoft Graph delegated permissions for the enabled operations
+
+## Technical Specifications
+
+### Architecture Overview
+
+The plugin now routes Graph calls through a shared request helper that:
+
+- acquires access tokens through the plugin-aware authentication helper
+- supports scoped overrides from the plugin manifest
+- builds common OData parameters consistently
+- handles pagination for Graph list responses
+- returns structured error payloads for consent, throttling, and HTTP failures
+
+### Operations Added or Enhanced
+
+- `get_my_profile`
+- `get_my_events`
+- `get_my_messages`
+- `search_users`
+- `get_user_by_email`
+- `list_drive_items`
+- `get_my_security_alerts`
+
+### File Structure
+
+- `msgraph_plugin.py`: Graph request helper, token handling, OData support, and operation methods
+- `functions_authentication.py`: delegated token acquisition and consent flow support
+
+## Usage Instructions
+
+1. Register the plugin with a Graph endpoint manifest.
+2. Ensure delegated Microsoft Graph permissions are granted for the desired operations.
+3. Use read operations first to confirm token scope coverage.
+4. If consent is required, surface the returned consent URL to the user and retry after consent is granted.
+
+## Testing and Validation
+
+- Functional regression: `functional_tests/test_msgraph_plugin_operations.py`
+
+## Performance Considerations
+
+- Graph list results are capped per request to avoid oversized agent payloads.
+- Pagination is followed only up to a bounded number of pages.
+- OData field selection should be used to reduce response size whenever possible.
+
+## Known Limitations
+
+- The plugin currently focuses on read-heavy operations and does not yet add write operations such as sending mail or creating events.
+- Security alert access still requires elevated delegated permissions and may not be appropriate for all tenants.
\ No newline at end of file
diff --git a/docs/explanation/features/NEW_FOUNDRY_REST_STREAMING.md b/docs/explanation/features/NEW_FOUNDRY_REST_STREAMING.md
new file mode 100644
index 00000000..24fee1eb
--- /dev/null
+++ b/docs/explanation/features/NEW_FOUNDRY_REST_STREAMING.md
@@ -0,0 +1,69 @@
+# NEW_FOUNDRY_REST_STREAMING.md
+
+# New Foundry REST Streaming
+
+## Overview
+
+Version implemented: **0.239.175**
+
+This change adds REST-based streaming support for New Foundry application agents while preserving the existing Semantic Kernel-based path for classic Azure AI Foundry agents.
+
+The goal is to let the app connect to classic and new Foundry simultaneously without taking a runtime dependency on Microsoft Agent Framework or forcing the whole app onto a newer `azure-ai-projects` version that conflicts with Semantic Kernel.
+
+## Dependencies
+
+- Semantic Kernel for classic Foundry agent execution
+- Azure Identity credentials for both classic and new Foundry authentication
+- Direct REST calls to the New Foundry Responses and project agent-list endpoints
+- Existing chat SSE pipeline in the app
+
+## Technical Specification
+
+### Architecture
+
+- Classic Foundry remains on the existing Semantic Kernel + `azure-ai-projects` 1.x path.
+- New Foundry uses app-local REST transport in `application/single_app/foundry_agent_runtime.py`.
+- New Foundry discovery uses the project `/agents` REST endpoint instead of SDK-only discovery.
+- The streaming route in `application/single_app/route_backend_chats.py` now forwards agent deltas as they are produced instead of buffering all chunks first.
+
+### Files Updated
+
+- `application/single_app/foundry_agent_runtime.py`
+- `application/single_app/route_backend_chats.py`
+- `application/single_app/route_backend_models.py`
+- `functional_tests/test_new_foundry_fetch_support.py`
+- `functional_tests/test_new_foundry_streaming_runtime.py`
+
+### Runtime Behavior
+
+- New Foundry application requests use the Responses protocol endpoint with `stream=true` for live streaming.
+- SSE events are parsed server-side and token deltas are forwarded through the app's `/api/chat/stream` endpoint.
+- Final model metadata and citations are still attached to the persisted assistant message when the stream completes.
+
+## Usage
+
+### Configuration
+
+Configure classic and new Foundry endpoints side by side through the existing model endpoint configuration.
+
+- Use provider `aifoundry` for classic Foundry agents.
+- Use provider `new_foundry` for New Foundry applications.
+
+### User Workflow
+
+1. Select a classic or new Foundry-backed agent.
+2. Send a chat message through the normal chat UI.
+3. If the selected agent is New Foundry, the app streams token deltas through `/api/chat/stream`.
+4. On completion, the final assistant message includes the full content, model metadata, and any citations captured from the runtime.
+
+## Testing And Validation
+
+- Functional coverage verifies New Foundry fetch support remains available.
+- Functional coverage verifies the runtime exposes a REST streaming executor and that the stream route consumes agent chunks incrementally.
+- Existing classic Foundry and general stream-route behavior remain on their previous code paths.
+
+## Known Limitations
+
+- Classic Foundry still depends on the existing Semantic Kernel integration and its current Azure SDK boundary.
+- `chat_api` still exists for compatibility scenarios outside the initial New Foundry stream path migration.
+- Real-time citation SSE events are not expanded in this phase; citations are still finalized when the response completes.
\ No newline at end of file
diff --git a/docs/explanation/features/WORKSPACE_MULTI_ENDPOINTS.md b/docs/explanation/features/WORKSPACE_MULTI_ENDPOINTS.md
new file mode 100644
index 00000000..06d816df
--- /dev/null
+++ b/docs/explanation/features/WORKSPACE_MULTI_ENDPOINTS.md
@@ -0,0 +1,65 @@
+# WORKSPACE MULTI ENDPOINTS (Version 0.236.045)
+
+## Overview
+Workspace multi-endpoint management extends the admin multi-endpoint system to personal and group workspaces. Users can configure workspace endpoints that live alongside global endpoints, and agent model selection is driven by these combined lists.
+
+**Implemented in version: 0.236.045**
+
+## Dependencies
+- Global model endpoints configured in admin settings
+- Workspace endpoint storage in user settings and group documents
+- Agent modal updates for multi-endpoint and Foundry agent discovery
+
+## Technical Specifications
+### Architecture Overview
+- Global endpoints remain in application settings.
+- Personal endpoints are stored in user settings under `personal_model_endpoints`.
+- Group endpoints are stored on group documents under `model_endpoints`.
+- Agent modal requests a combined, sanitized endpoint list for model selection.
+- Foundry agent lookup uses endpoint IDs to resolve authentication and list agents.
+
+### API Endpoints
+- `GET /api/user/model-endpoints` / `POST /api/user/model-endpoints`
+- `GET /api/group/model-endpoints` / `POST /api/group/model-endpoints`
+- `GET /api/user/agent/settings`
+- `GET /api/group/agent/settings`
+- `POST /api/models/foundry/agents`
+- `POST /api/user/models/fetch` / `POST /api/user/models/test-model`
+- `POST /api/group/models/fetch` / `POST /api/group/models/test-model`
+
+### Configuration
+- Global toggle: `enable_multi_model_endpoints` in [application/single_app/config.py](application/single_app/config.py)
+- Workspace endpoints stored per user and per group
+
+### File Structure
+- Frontend templates: [application/single_app/templates/workspace.html](application/single_app/templates/workspace.html), [application/single_app/templates/group_workspaces.html](application/single_app/templates/group_workspaces.html), [application/single_app/templates/_agent_modal.html](application/single_app/templates/_agent_modal.html)
+- Frontend logic: [application/single_app/static/js/workspace/workspace_model_endpoints.js](application/single_app/static/js/workspace/workspace_model_endpoints.js), [application/single_app/static/js/agent_modal_stepper.js](application/single_app/static/js/agent_modal_stepper.js)
+- Backend: [application/single_app/route_backend_models.py](application/single_app/route_backend_models.py), [application/single_app/route_backend_agents.py](application/single_app/route_backend_agents.py), [application/single_app/semantic_kernel_loader.py](application/single_app/semantic_kernel_loader.py)
+
+## Usage Instructions
+### Enable/Configure
+1. Admin enables multi-endpoint model management in admin settings.
+2. Users open Personal Workspace or Group Workspace and add endpoints under the new Workspace/Group Model Endpoints card.
+3. In the agent modal, select a model from the combined endpoint list.
+
+### Foundry Agent Import
+1. Select an Azure AI Foundry endpoint in the Foundry section of the agent modal.
+2. Click **Fetch Agents** to list available agents.
+3. Select an agent to import its identity, then save.
+
+### User Workflows
+- Personal agents can select models from global + personal endpoints.
+- Group agents can select models from global + group endpoints.
+- Foundry agents auto-populate identities from the selected Foundry endpoint.
+
+## Testing and Validation
+- Functional test: [functional_tests/test_workspace_multi_endpoints.py](functional_tests/test_workspace_multi_endpoints.py)
+- Manual validation:
+ - Add workspace endpoints and ensure they appear in agent model dropdowns.
+ - Verify Foundry agent list import using configured endpoints.
+
+## Performance Considerations
+- Model discovery uses on-demand API calls to Azure/Foundry endpoints.
+
+## Known Limitations
+- Workspace endpoints require configured credentials; only stored secrets are used for runtime resolution.
diff --git a/docs/explanation/features/v0.229.001/COMPREHENSIVE_UI_PERFORMANCE_ENHANCEMENTS.md b/docs/explanation/features/v0.229.001/COMPREHENSIVE_UI_PERFORMANCE_ENHANCEMENTS.md
index 79f5002c..ce04b507 100644
--- a/docs/explanation/features/v0.229.001/COMPREHENSIVE_UI_PERFORMANCE_ENHANCEMENTS.md
+++ b/docs/explanation/features/v0.229.001/COMPREHENSIVE_UI_PERFORMANCE_ENHANCEMENTS.md
@@ -910,7 +910,7 @@ class CitationGenerator:
from datetime import datetime
date_obj = datetime.fromisoformat(creation_date.replace('Z', '+00:00'))
citation_parts.append(f"({date_obj.year})")
- except:
+ except Exception as ex:
pass
citation_parts.append(f"Retrieved from {url}")
diff --git a/docs/explanation/fixes/AGENT_DROPDOWN_SCOPE_BY_CONVERSATION_METADATA_FIX.md b/docs/explanation/fixes/AGENT_DROPDOWN_SCOPE_BY_CONVERSATION_METADATA_FIX.md
new file mode 100644
index 00000000..10753fec
--- /dev/null
+++ b/docs/explanation/fixes/AGENT_DROPDOWN_SCOPE_BY_CONVERSATION_METADATA_FIX.md
@@ -0,0 +1,34 @@
+# Agent Dropdown Scope by Conversation Metadata Fix (0.236.063)
+
+## Issue Description
+Personal agents were hidden in chat even when the current conversation was personal. Filtering relied on global group state rather than the active conversation scope, so group context suppressed personal agents.
+
+## Root Cause Analysis
+The agent dropdown used `activeGroupId` to decide scope. The chat page always renders an active group ID, so personal conversations were mistakenly treated as group scope. The UI already stores the conversation scope in `data-chat-type`, but the dropdown ignored it.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.063**
+
+## Technical Details
+- **Files modified**:
+ - application/single_app/static/js/chat/chat-agents.js
+ - application/single_app/static/js/chat/chat-retry.js
+ - application/single_app/config.py
+- **Change summary**:
+ - Scope is derived from the active conversation's `data-chat-type`.
+ - New conversations with no metadata show all agents.
+ - Group scope shows group + global agents only; other scopes show personal + global.
+
+## Testing Approach
+- Added functional test: functional_tests/test_agent_dropdown_scope_by_conversation_metadata.py
+- Test validates the metadata-based scope logic is present in both chat dropdown scripts.
+
+## Impact Analysis
+- Personal agents are visible in personal conversations.
+- Group agents are shown only in group conversations.
+- New conversations default to showing all agents.
+
+## Validation
+- Start a new chat: personal + group + global agents appear.
+- Open a group chat: only group + global agents appear.
+- Open a personal chat: only personal + global agents appear.
diff --git a/docs/explanation/fixes/AGENT_GPT_INIT_MULTIENDPOINT_GATING_FIX.md b/docs/explanation/fixes/AGENT_GPT_INIT_MULTIENDPOINT_GATING_FIX.md
new file mode 100644
index 00000000..99f454d3
--- /dev/null
+++ b/docs/explanation/fixes/AGENT_GPT_INIT_MULTIENDPOINT_GATING_FIX.md
@@ -0,0 +1,31 @@
+# Agent GPT Init Multi-Endpoint Gating Fix (Version 0.236.052)
+
+## Overview
+Ensures multi-endpoint GPT resolution is skipped for agent-based requests and avoids APIM initialization failures when no model deployment is provided.
+
+## Issue Description
+Agent chat requests include `agent_info` but no `model_id` or `model_deployment`. With APIM enabled and multiple deployments configured, GPT initialization failed before agent invocation, blocking the request.
+
+## Root Cause Analysis
+`resolve_multi_endpoint_gpt_config` runs early in chat initialization, and APIM requires `model_deployment` when multiple deployments are configured. Agent requests do not send these fields, causing premature failures.
+
+## Fix Summary
+- Skip multi-endpoint GPT resolution when `agent_info` is present.
+- Default to the first APIM deployment for agent requests without `model_deployment`.
+
+## Files Modified
+- application/single_app/route_backend_chats.py
+- application/single_app/config.py
+- functional_tests/test_agent_gpt_init_skips_multiendpoint.py
+
+## Testing
+- Added functional test: `test_agent_gpt_init_skips_multiendpoint.py`.
+
+## Validation
+- Verified GPT init gating logs and APIM defaulting behavior for agent requests.
+
+## Version
+Fixed/Implemented in version: **0.236.052**
+
+## Config Version Reference
+`config.py` updated to `VERSION = "0.236.052"`.
diff --git a/docs/explanation/fixes/AGENT_MODAL_MODEL_ENDPOINT_FILTERING_FIX.md b/docs/explanation/fixes/AGENT_MODAL_MODEL_ENDPOINT_FILTERING_FIX.md
new file mode 100644
index 00000000..8e57cff3
--- /dev/null
+++ b/docs/explanation/fixes/AGENT_MODAL_MODEL_ENDPOINT_FILTERING_FIX.md
@@ -0,0 +1,30 @@
+# Agent Modal Model Endpoint Filtering Fix (Version 0.236.056)
+
+## Issue Description
+Local agent modals did not show non-Azure OpenAI endpoints when selecting a model. This made Azure AI Foundry models unavailable in the modal even when multi-endpoint settings returned them.
+
+## Root Cause Analysis
+The agent modal dropdown filtered model endpoints by provider, allowing only Azure OpenAI providers for local agents. Foundry endpoints were excluded even when provided by the settings API.
+
+## Fix Summary
+- Allow local agents to include non-AOAI providers in the modal dropdown.
+- Normalize model identifiers and display labels so entries without explicit IDs still render.
+
+## Technical Details
+- Files modified:
+ - application/single_app/static/js/agents_common.js
+ - application/single_app/config.py
+- Updated provider filtering logic to only restrict when the agent type is explicitly `aifoundry`.
+- Added model ID and display name normalization to improve dropdown resilience.
+
+## Testing
+- Functional test: functional_tests/test_agent_modal_model_endpoint_filtering.py
+
+## Impact Analysis
+- Local agent modals now list Foundry endpoints wherever the modal is used (admin, user, group).
+- Existing AOAI behavior is unchanged.
+
+## Fixed/Implemented in version: **0.236.056**
+
+## Config Version Update
+- Updated VERSION in application/single_app/config.py to 0.236.056.
diff --git a/docs/explanation/fixes/AGENT_MODAL_MULTIENDPOINT_FOUNDARY_NOTICE_FIX.md b/docs/explanation/fixes/AGENT_MODAL_MULTIENDPOINT_FOUNDARY_NOTICE_FIX.md
new file mode 100644
index 00000000..ec9a7c8c
--- /dev/null
+++ b/docs/explanation/fixes/AGENT_MODAL_MULTIENDPOINT_FOUNDARY_NOTICE_FIX.md
@@ -0,0 +1,33 @@
+# Agent Modal Multi-Endpoint & Foundry Notice Fix (Version 0.236.054)
+
+## Overview
+Hides custom connection controls in the agent modal when multi-endpoint model management is enabled and adds an advanced settings notice for Azure AI Foundry agents.
+
+## Issue Description
+When multi-endpoint model management is enabled, custom connection fields should not be available in the agent modal. Additionally, Azure AI Foundry agents need a clear notice that advanced settings are managed by Foundry.
+
+## Root Cause Analysis
+The agent modal always rendered custom connection controls and lacked a Foundry-specific advanced settings notice.
+
+## Fix Summary
+- Hide custom connection toggle and fields via Jinja when multi-endpoint model management is enabled.
+- Add an advanced settings notice for Foundry agents and toggle visibility in the modal stepper.
+
+## Files Modified
+- application/single_app/templates/_agent_modal.html
+- application/single_app/static/js/agent_modal_stepper.js
+- application/single_app/config.py
+- functional_tests/test_agent_modal_multiendpoint_foundry_advanced_notice.py
+
+## Testing
+- Added functional test: `test_agent_modal_multiendpoint_foundry_advanced_notice.py`.
+
+## Validation
+- Verified the custom connection fields are gated by the multi-endpoint setting.
+- Verified the Foundry advanced notice is toggled when selecting Foundry agent type.
+
+## Version
+Fixed/Implemented in version: **0.236.054**
+
+## Config Version Reference
+`config.py` updated to `VERSION = "0.236.054"`.
diff --git a/docs/explanation/fixes/AGENT_PAYLOAD_FIELD_LENGTHS_FIX.md b/docs/explanation/fixes/AGENT_PAYLOAD_FIELD_LENGTHS_FIX.md
new file mode 100644
index 00000000..e7f38582
--- /dev/null
+++ b/docs/explanation/fixes/AGENT_PAYLOAD_FIELD_LENGTHS_FIX.md
@@ -0,0 +1,27 @@
+# Agent Payload Field Lengths Fix (Version 0.237.009)
+
+## Header Information
+- **Fix Title:** Agent payload field length validation
+- **Issue Description:** Agent payload validation did not enforce length limits, allowing oversized values into storage.
+- **Root Cause Analysis:** Length checks existed but were never invoked in `sanitize_agent_payload`, and no limits covered Azure-specific fields.
+- **Fixed/Implemented in version:** **0.237.009**
+- **Config Version Updated:** `config.py` VERSION set to **0.237.009**
+
+## Technical Details
+- **Files Modified:**
+ - application/single_app/functions_agent_payload.py
+ - application/single_app/config.py
+- **Code Changes Summary:**
+ - Added max length recommendations for Azure OpenAI and APIM fields.
+ - Validated field lengths in `sanitize_agent_payload` and for Foundry settings.
+ - Bumped application version in config.py.
+- **Testing Approach:**
+ - Added a functional test to confirm validation wiring and limits are present.
+
+## Validation
+- **Test Results:** functional_tests/test_agent_payload_field_lengths.py
+- **Before/After Comparison:**
+ - Before: Oversized agent fields could pass validation.
+ - After: Oversized fields raise `AgentPayloadError` with a clear message.
+- **User Experience Improvements:**
+ - Prevents invalid payloads and provides consistent validation feedback.
diff --git a/docs/explanation/fixes/AGENT_SELECTION_MODEL_ROUTING_FIX.md b/docs/explanation/fixes/AGENT_SELECTION_MODEL_ROUTING_FIX.md
new file mode 100644
index 00000000..ed645b7d
--- /dev/null
+++ b/docs/explanation/fixes/AGENT_SELECTION_MODEL_ROUTING_FIX.md
@@ -0,0 +1,56 @@
+# AGENT_SELECTION_MODEL_ROUTING_FIX.md
+
+## Agent Selection Model Routing Fix (v0.239.173)
+
+Fixed/Implemented in version: **0.239.173**
+
+### Issue Description
+
+Per-user Semantic Kernel requests could fall back into model-only mode even when the selected
+personal agent still existed. In that state, chats skipped agent invocation entirely and used the
+standard GPT routing path instead.
+
+### Root Cause Analysis
+
+The per-user agent loader fetched personal agents correctly, merged any group agents, and then
+reset `agents_cfg` back to an empty list before global-agent merge and agent selection. When
+`merge_global_semantic_kernel_with_workspace` was enabled, that left only global agents in the
+candidate set. Personal agents like `graph` were therefore invisible during selection even though
+they were present in Cosmos.
+
+### Technical Details
+
+Files modified:
+- `application/single_app/route_backend_agents.py`
+- `application/single_app/semantic_kernel_loader.py`
+- `application/single_app/route_backend_chats.py`
+- `application/single_app/config.py`
+- `functional_tests/test_agent_selection_recovery.py`
+
+Code changes summary:
+- Added server-side validation so `/api/user/settings/selected_agent` only saves agents that are
+ actually selectable for the current user and scope.
+- Removed the candidate-list reset so personal agents remain available during merge and selection.
+- Normalized per-user chat selection handling so dict-based `selected_agent` settings resolve to
+ agent names correctly in the chat route.
+- Bumped the application version and added regression coverage.
+
+Testing approach:
+- Added `functional_tests/test_agent_selection_recovery.py` to verify personal agents remain in
+ the loader candidate set and invalid selections are rejected when saved.
+
+Impact analysis:
+- Prevents valid personal agents from being dropped during per-user merge logic.
+- Keeps agent-routed chats on the agent’s configured model instead of dropping into the standard
+ model-only fallback path.
+
+### Validation
+
+Before:
+- Personal agents were loaded, but then removed from `agents_cfg` before selection.
+- Requests then used the normal GPT model path rather than the selected agent model.
+
+After:
+- Personal agents remain in the per-user candidate set during global merge.
+- Invalid agent selections are rejected when saved.
+- Agent-enabled chats remain on the intended agent invocation path.
\ No newline at end of file
diff --git a/docs/explanation/fixes/AGENT_TEMPLATE_APPROVALS_AND_NOTIFICATIONS_FIX.md b/docs/explanation/fixes/AGENT_TEMPLATE_APPROVALS_AND_NOTIFICATIONS_FIX.md
new file mode 100644
index 00000000..933c5f14
--- /dev/null
+++ b/docs/explanation/fixes/AGENT_TEMPLATE_APPROVALS_AND_NOTIFICATIONS_FIX.md
@@ -0,0 +1,47 @@
+# Agent Template Approvals And Notifications Fix (Version 0.239.163)
+
+## Header Information
+- **Fix Title:** Shared approvals page for agent template review and approval notification cleanup
+- **Issue Description:** Agent template approvals were managed only inside Admin Settings, and approval notifications did not consistently notify submitters or clear stale admin pending notices.
+- **Root Cause Analysis:** The agent template review queue lived in a separate admin page, while notification cleanup logic did not handle assignment-scope notification partitions used for reviewer work queues.
+- **Fixed/Implemented in version:** **0.239.163**
+- **Config Version Updated:** `config.py` VERSION set to **0.239.163**
+
+## Technical Details
+- **Files Modified:**
+ - application/single_app/functions_notifications.py
+ - application/single_app/functions_approvals.py
+ - application/single_app/functions_agent_templates.py
+ - application/single_app/route_backend_agent_templates.py
+ - application/single_app/route_frontend_control_center.py
+ - application/single_app/templates/approvals.html
+ - application/single_app/templates/admin_settings.html
+ - application/single_app/config.py
+ - functional_tests/test_approval_notification_routing_fix.py
+ - ui_tests/test_approvals_agent_template_admin_section.py
+- **Code Changes Summary:**
+ - Added notification types for requester pending states and agent template review outcomes.
+ - Added shared notification partition resolution and metadata-based cleanup helpers so assignment-scope reviewer notifications can be removed reliably.
+ - Notified approval request submitters when requests enter pending status and kept result notifications for approved and denied outcomes.
+ - Added agent template notifications for pending review, approved, declined, and deleted outcomes.
+ - Updated rejection notifications to include the reviewer-provided reason directly in the notification message.
+ - Added read-time notification message enrichment so older generic rejection notifications also display their stored rejection reasons.
+ - Added activity log entries for agent template submissions, approvals, rejections, and deletions.
+ - Replaced the native browser delete confirmation with a reusable Bootstrap confirmation modal for template deletion flows.
+ - Moved the admin template review queue and existing review modal onto `/approvals` while leaving configuration toggles in Admin Settings.
+ - Bumped application version in `config.py`.
+- **Testing Approach:**
+ - Added a functional test covering approval and agent template notification routing plus stale reviewer notification cleanup.
+ - Added a UI test covering the admin-only agent template approvals section on `/approvals`.
+
+## Validation
+- **Test Results:**
+ - functional_tests/test_approval_notification_routing_fix.py
+ - ui_tests/test_approvals_agent_template_admin_section.py
+- **Before/After Comparison:**
+ - Before: Admins reviewed templates from Admin Settings, submitters were not notified when requests entered pending state, and reviewer pending notifications could linger after a decision.
+ - After: Admins review templates from `/approvals`, submitters receive pending and result notifications, and reviewer pending notifications are cleared when an approval is resolved.
+- **User Experience Improvements:**
+ - Centralized approval workflows for admins.
+ - Clearer notification lifecycle for both request submitters and reviewers.
+ - Reduced stale notification noise after approval decisions.
\ No newline at end of file
diff --git a/docs/explanation/fixes/AGENT_TEMPLATE_MAX_LENGTHS_FIX.md b/docs/explanation/fixes/AGENT_TEMPLATE_MAX_LENGTHS_FIX.md
new file mode 100644
index 00000000..71e1f0de
--- /dev/null
+++ b/docs/explanation/fixes/AGENT_TEMPLATE_MAX_LENGTHS_FIX.md
@@ -0,0 +1,27 @@
+# Agent Template Max Lengths Fix (Version 0.237.010)
+
+## Header Information
+- **Fix Title:** Agent template max length validation
+- **Issue Description:** Agent template updates did not enforce length limits, allowing oversized fields into storage.
+- **Root Cause Analysis:** Length checks were missing from the update path in `update_agent_template`.
+- **Fixed/Implemented in version:** **0.237.010**
+- **Config Version Updated:** `config.py` VERSION set to **0.237.010**
+
+## Technical Details
+- **Files Modified:**
+ - application/single_app/functions_agent_templates.py
+ - application/single_app/config.py
+- **Code Changes Summary:**
+ - Added max length constants for template fields and list items.
+ - Validated lengths during template updates.
+ - Bumped application version in config.py.
+- **Testing Approach:**
+ - Added a functional test to validate length validation wiring.
+
+## Validation
+- **Test Results:** functional_tests/test_agent_template_length_validation.py
+- **Before/After Comparison:**
+ - Before: Oversized template fields could be saved.
+ - After: Oversized fields raise a validation error before persistence.
+- **User Experience Improvements:**
+ - Consistent template validation and clearer error feedback.
diff --git a/docs/explanation/fixes/AI_MODELS_TAB_EMBEDDING_IMAGE_LOCATION_FIX.md b/docs/explanation/fixes/AI_MODELS_TAB_EMBEDDING_IMAGE_LOCATION_FIX.md
new file mode 100644
index 00000000..92a698d2
--- /dev/null
+++ b/docs/explanation/fixes/AI_MODELS_TAB_EMBEDDING_IMAGE_LOCATION_FIX.md
@@ -0,0 +1,34 @@
+# AI Models Tab Embedding/Image Location Fix (v0.236.014)
+
+## Issue Description
+Embeddings and image generation configuration were placed inside the legacy AI model modal, which made them harder to find and inconsistent with the expected AI Models tab layout.
+
+## Root Cause Analysis
+The embeddings and image generation cards were moved into the legacy modal during a refactor that consolidated legacy GPT settings into a modal, unintentionally relocating non-legacy sections.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.014**
+
+## Technical Details
+### Files Modified
+- application/single_app/templates/admin_settings.html
+- application/single_app/config.py
+
+### Code Changes Summary
+- Moved the embeddings and image generation cards back to the AI Models tab.
+- Kept GPT configuration in the legacy modal.
+- Incremented the application version.
+
+### Testing Approach
+- Added a functional test to assert that the embeddings and image generation sections are outside the legacy modal markup.
+
+### Impact Analysis
+- Restores expected layout for administrators.
+- Prevents settings from being hidden in the legacy modal.
+- Maintains legacy GPT configuration flow without impacting multi-endpoint UI.
+
+## Validation
+- Functional test: functional_tests/test_ai_models_tab_embedding_image_location.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.014**.
diff --git a/docs/explanation/fixes/AOAI_MODEL_DISCOVERY_EMPTY_LIST_FIX.md b/docs/explanation/fixes/AOAI_MODEL_DISCOVERY_EMPTY_LIST_FIX.md
new file mode 100644
index 00000000..afe070c5
--- /dev/null
+++ b/docs/explanation/fixes/AOAI_MODEL_DISCOVERY_EMPTY_LIST_FIX.md
@@ -0,0 +1,36 @@
+# AOAI Model Discovery Empty List Fix (v0.236.015)
+
+## Issue Description
+Azure OpenAI endpoint discovery in the multi-endpoint modal returned a successful connection but always displayed zero models.
+
+## Root Cause Analysis
+The `/api/models/fetch` endpoint only handled Azure AI Foundry and returned an empty list for Azure OpenAI providers. The modal payload also lacked resource group support required to query deployments via ARM.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.015**
+
+## Technical Details
+### Files Modified
+- application/single_app/templates/admin_settings.html
+- application/single_app/static/js/admin/admin_model_endpoints.js
+- application/single_app/route_backend_models.py
+- application/single_app/config.py
+
+### Code Changes Summary
+- Added Resource Group input to the model endpoint modal for AOAI discovery.
+- Required subscription + resource group for AOAI payloads.
+- Implemented AOAI deployment listing via ARM in `/api/models/fetch` and `/api/models/test-connection`.
+- Incremented the application version.
+
+### Testing Approach
+- Added a functional test to validate AOAI discovery wiring and resource group handling.
+
+### Impact Analysis
+- AOAI model fetch now returns deployments for the selected endpoint.
+- Prevents misleading “success with zero models” results.
+
+## Validation
+- Functional test: functional_tests/test_model_endpoints_aoai_fetch_fix.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.015**.
diff --git a/docs/explanation/fixes/CHAT_MODEL_DESCRIPTION_TOOLTIP_FIX.md b/docs/explanation/fixes/CHAT_MODEL_DESCRIPTION_TOOLTIP_FIX.md
new file mode 100644
index 00000000..0e8d9443
--- /dev/null
+++ b/docs/explanation/fixes/CHAT_MODEL_DESCRIPTION_TOOLTIP_FIX.md
@@ -0,0 +1,31 @@
+# Chat Model Description Tooltip Fix (v0.236.023)
+
+## Issue Description
+Chat model options did not expose model descriptions on hover, making it harder for users to choose between similar models.
+
+## Root Cause Analysis
+The model select options did not include a tooltip title derived from the model description.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.023**
+
+## Technical Details
+### Files Modified
+- application/single_app/templates/chats.html
+- application/single_app/config.py
+
+### Code Changes Summary
+- Added a title attribute on multi-endpoint model options using the model description (or display name as fallback).
+- Incremented the application version.
+
+### Testing Approach
+- Added a functional test to verify the tooltip title is present in the chat template.
+
+### Impact Analysis
+- Improves model selection clarity with descriptive hover text.
+
+## Validation
+- Functional test: functional_tests/test_chat_model_description_tooltip.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.023**.
diff --git a/docs/explanation/fixes/CHAT_STREAM_HEARTBEAT_REATTACH_FIX.md b/docs/explanation/fixes/CHAT_STREAM_HEARTBEAT_REATTACH_FIX.md
new file mode 100644
index 00000000..0b10f8bd
--- /dev/null
+++ b/docs/explanation/fixes/CHAT_STREAM_HEARTBEAT_REATTACH_FIX.md
@@ -0,0 +1,68 @@
+# Chat Stream Heartbeat Reattach Fix
+
+Fixed/Implemented in version: **0.239.183**
+
+## Issue Description
+
+Long-running chat streams could still go quiet long enough to hit Azure App Service idle timeouts, and navigating away from a conversation meant the browser lost the live stream even though the backend worker kept running.
+
+The original reconnect support also depended on in-process memory, so reconnecting from a different App Service or gunicorn worker could miss the active stream entirely.
+
+## Root Cause Analysis
+
+The streaming route sent useful SSE data only when a model token or explicit thought event was available. There was no transport-level heartbeat during long blocking phases such as tool execution or tabular analysis.
+
+The background worker also outlived the original HTTP consumer, but the live response stream was only attached to that first request. Reopening the conversation later loaded persisted messages only and had no way to replay or rejoin the in-flight response.
+
+The first reconnect registry stored replay state only in module globals, which made it best-effort inside a single worker process but not durable across multi-worker deployments.
+
+## Technical Details
+
+### Files Modified
+
+- `application/single_app/route_backend_chats.py`
+- `application/single_app/app_settings_cache.py`
+- `application/single_app/static/js/chat/chat-streaming.js`
+- `application/single_app/static/js/chat/chat-conversations.js`
+- `application/single_app/static/js/chat/chat-messages.js`
+- `application/single_app/config.py`
+- `functional_tests/test_chat_stream_background_execution.py`
+- `functional_tests/test_chat_stream_heartbeat_reattach.py`
+- `functional_tests/test_chat_stream_compatibility_sse_syntax.py`
+- `functional_tests/test_chat_stream_debug_logging.py`
+- `functional_tests/test_streaming_only_chat_path.py`
+- `functional_tests/test_streaming_thought_finalization.py`
+
+### Code Changes Summary
+
+- Added SSE heartbeat comments so active stream connections continue emitting bytes during long idle gaps.
+- Added a per-user, per-conversation active stream registry that stores in-flight SSE history for reconnecting consumers.
+- Added Redis-backed session metadata and event replay through `app_settings_cache.py`, with same-process in-memory fallback when Redis is disabled.
+- Added stream status and reattach endpoints so the frontend can reconnect to an active conversation stream.
+- Updated the chat client to remove the hardcoded five-minute timeout, wait for saved message reload, and then attempt reattachment for pending conversations.
+- Kept backend processing detached from the browser request so navigation away does not cancel server-side completion.
+- Bumped the application version to `0.239.183`.
+
+### Testing Approach
+
+- Added `functional_tests/test_chat_stream_heartbeat_reattach.py` to verify heartbeat emission, cache-backed session replay hooks, and frontend reattach hooks.
+- Updated existing streaming regression tests to reflect the new version and stream-session routing.
+
+## Validation
+
+### Before
+
+- Long blocking phases could leave the SSE connection silent for too long.
+- A user returning to the conversation only saw persisted messages after the stream fully completed.
+- The frontend had a separate five-minute timeout path that could interrupt the visible stream.
+
+### After
+
+- Active stream responses emit keep-alive comment frames while waiting for the next real SSE payload.
+- Reopening a conversation can reconnect to the still-running stream and replay its in-flight events.
+- Redis-enabled deployments can reattach across App Service or gunicorn workers because session metadata and event history are no longer limited to a single process.
+- The backend worker continues independently, and the frontend no longer imposes a hard five-minute timeout on the stream.
+
+### Impact Analysis
+
+This change improves resilience for long-running chat operations without changing the persisted message model. Completed replies still become the source of truth in Cosmos DB, while in-flight replies gain a durable reconnect path when Redis is enabled and still retain same-process fallback behavior when it is not.
\ No newline at end of file
diff --git a/docs/explanation/fixes/CHAT_TAGGING_ENDPOINT_VISIBILITY_AND_DARK_MODE_FIX.md b/docs/explanation/fixes/CHAT_TAGGING_ENDPOINT_VISIBILITY_AND_DARK_MODE_FIX.md
new file mode 100644
index 00000000..c37d533b
--- /dev/null
+++ b/docs/explanation/fixes/CHAT_TAGGING_ENDPOINT_VISIBILITY_AND_DARK_MODE_FIX.md
@@ -0,0 +1,71 @@
+# Chat Tagging Endpoint Visibility And Dark Mode Fix
+
+Fixed/Implemented in version: **0.239.167**
+
+## Issue Description
+
+Three related UI regressions needed correction:
+
+- Unsupported `new_foundry` model endpoints were still visible in user-facing multi-endpoint flows.
+- Personal conversations displayed visible tags, while group conversation labels were inconsistent between the active header and sidebar, and some group-agent conversations failed to surface a group tag.
+- The upload user agreement content became unreadable in dark mode because its light background styling bypassed the shared dark-mode override.
+
+## Root Cause Analysis
+
+- Frontend endpoint sanitization removed secrets but did not filter unsupported providers before rendering workspace, group, and chat endpoint views.
+- Endpoint saves merged only the visible payload, so hiding unsupported providers on read alone would have risked deleting previously stored hidden endpoints.
+- The streaming chat path saved conversation metadata without the selected agent details, which prevented group-agent conversations from consistently preserving primary group context.
+- The sidebar conversation renderer emitted visible `personal` badges and hard-coded `group` text instead of using the group name.
+- The active conversation header and details formatter shortened group labels when the full group name should have been shown in the primary chat view.
+- The upload agreement modal used an inline `background-color: var(--bs-light)` style, which bypassed the existing `[data-bs-theme="dark"] .bg-light` override.
+
+## Technical Details
+
+### Files Modified
+
+- `application/single_app/functions_settings.py`
+- `application/single_app/templates/_multiendpoint_modal.html`
+- `application/single_app/static/js/chat/chat-conversations.js`
+- `application/single_app/static/js/chat/chat-conversation-details.js`
+- `application/single_app/route_backend_chats.py`
+- `application/single_app/templates/base.html`
+- `application/single_app/config.py`
+- `functional_tests/test_chat_tagging_and_endpoint_provider_visibility.py`
+- `ui_tests/test_model_endpoint_request_uses_endpoint_id.py`
+- `ui_tests/test_upload_agreement_dark_mode.py`
+
+### Code Changes Summary
+
+- Added shared provider visibility filtering so only `aoai` and `aifoundry` are exposed in frontend endpoint payloads.
+- Preserved stored hidden endpoints during merge/save flows so unsupported `new_foundry` entries remain in data even though they are no longer shown in the UI.
+- Removed the New Foundry option from the multi-endpoint provider selector and added APIM provider guidance under the field.
+- Updated conversation tag rendering so personal conversations show no visible badge, the active conversation header shows the full group name, and the sidebar shows a short group tag using the first 8 characters of the group name.
+- Updated the conversation details formatter to show the full group name.
+- Fixed the streaming chat path to pass selected agent metadata into conversation metadata collection so group-agent conversations retain group context.
+- Swapped the upload agreement content container to the shared `bg-light` class so the existing dark-mode override applies.
+
+### Testing Approach
+
+- Functional regression: `functional_tests/test_chat_tagging_and_endpoint_provider_visibility.py`
+- UI regression: `ui_tests/test_model_endpoint_request_uses_endpoint_id.py`
+- UI regression: `ui_tests/test_upload_agreement_dark_mode.py`
+
+## Validation
+
+### Before
+
+- Users could still see unsupported New Foundry endpoint options in user-facing multi-endpoint workflows.
+- Personal conversations displayed visible `personal` badges in the sidebar.
+- Group conversations showed inconsistent labels between the active header and the sidebar.
+- Group-agent conversations could lose their group tag when saved through the streaming path.
+- The upload agreement content could render as light text on a light surface in dark mode.
+
+### After
+
+- Only Azure OpenAI and Foundry (classic) are exposed in the visible multi-endpoint provider UI.
+- Existing hidden unsupported endpoints are preserved in storage.
+- Personal conversations no longer render a visible tag.
+- The active conversation header and details view show the full group name.
+- Sidebar group conversations render a short badge using the first 8 characters of the group name.
+- Streaming group-agent conversations retain group metadata for tag display and related filtering.
+- The upload agreement content uses the shared dark-mode-safe light surface styling.
\ No newline at end of file
diff --git a/docs/explanation/fixes/CHAT_TOOLBAR_LAYOUT_FIX.md b/docs/explanation/fixes/CHAT_TOOLBAR_LAYOUT_FIX.md
new file mode 100644
index 00000000..fe8e384b
--- /dev/null
+++ b/docs/explanation/fixes/CHAT_TOOLBAR_LAYOUT_FIX.md
@@ -0,0 +1,45 @@
+# Chat Toolbar Layout Fix
+
+Fixed/Implemented in version: **0.239.170**
+
+## Issue Description
+
+The chat toolbar could either leave an empty band above the chat input or let prompt and agent/model selectors crowd each other when the chat pane became narrower.
+
+## Root Cause Analysis
+
+- The selector and toggle controls are separate sibling groups, but the actions cluster can wrap internally as the available pane width shrinks.
+- Without a medium-width breakpoint, that internal wrapping either created a blank band above the controls or forced the prompt and agent/model selectors to compete for the same row.
+- The layout needed different behavior for wide, medium, and mobile widths instead of a single desktop rule.
+
+## Technical Details
+
+### Files Modified
+
+- `application/single_app/static/css/chats.css`
+- `application/single_app/config.py`
+- `functional_tests/test_chat_toolbar_layout.py`
+
+### Code Changes Summary
+
+- Kept the selector and toggle controls as separate sibling groups.
+- Updated the wide-layout toolbar flex rules so the controls stay on one row and align to the bottom edge.
+- Added a medium-width breakpoint that promotes the actions and controls clusters to separate full-width rows before selector overlap can occur.
+- Kept the mobile breakpoint wrapped behavior so the toolbar can still stack on narrow screens.
+- Added a regression test to verify the sibling structure plus the wide and medium responsive layout hooks remain in place.
+
+### Testing Approach
+
+- Functional regression: `functional_tests/test_chat_toolbar_layout.py`
+
+## Validation
+
+### Before
+
+- The selector dropdowns and the voice toggle could drop awkwardly or crowd each other, depending on the pane width.
+
+### After
+
+- The selector dropdowns and the voice toggle remain as separate sibling groups.
+- On wide layouts they stay aligned on a single row.
+- On medium widths they switch to clean full-width rows before prompt and agent/model controls can overlap.
\ No newline at end of file
diff --git a/docs/explanation/fixes/CHAT_TYPE_NORMALIZATION_FIX.md b/docs/explanation/fixes/CHAT_TYPE_NORMALIZATION_FIX.md
new file mode 100644
index 00000000..035c7841
--- /dev/null
+++ b/docs/explanation/fixes/CHAT_TYPE_NORMALIZATION_FIX.md
@@ -0,0 +1,46 @@
+# Chat Type Normalization Fix (0.236.065)
+
+## Fix Title
+Chat Type Normalization for New and Personal Conversations
+
+## Issue Description
+New conversations and legacy personal conversations could lack a consistent `chat_type`, which made scope-aware UI logic inconsistent across agent dropdowns, badges, and metadata views.
+
+## Root Cause Analysis
+Chat type values were optional in conversation items. Missing or legacy values were not normalized, causing ambiguous scope handling and inconsistent UI state.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.065**
+
+## Technical Details
+### Files Modified
+- application/single_app/static/js/chat/chat-conversations.js
+- application/single_app/static/js/chat/chat-agents.js
+- application/single_app/static/js/chat/chat-retry.js
+- application/single_app/static/js/chat/chat-conversation-details.js
+- application/single_app/route_backend_conversations.py
+- application/single_app/route_backend_chats.py
+- application/single_app/functions_conversation_metadata.py
+- application/single_app/config.py
+- functional_tests/test_chat_type_normalization.py
+
+### Code Changes Summary
+- Normalize missing or legacy personal chat types to `personal_single_user`.
+- Mark new conversations with `chat_type = "new"` on creation.
+- Persist normalization server-side for legacy conversations.
+- Update chat detail rendering and badges to handle `personal_single_user` and `new`.
+
+## Testing Approach
+- Added a functional test to assert chat type normalization across UI and backend sources.
+
+## Validation
+### Test Results
+- Functional test: functional_tests/test_chat_type_normalization.py
+
+### User Experience Improvements
+- Consistent agent scope determination for new and personal chats.
+- Stable chat type metadata in conversation lists and details.
+
+## Related Updates
+- Config version updated to 0.236.065.
+- Functional test added for chat type normalization coverage.
diff --git a/docs/explanation/fixes/CONTROL_CENTER_DATE_LABELS_FIX.md b/docs/explanation/fixes/CONTROL_CENTER_DATE_LABELS_FIX.md
new file mode 100644
index 00000000..a7d1bf34
--- /dev/null
+++ b/docs/explanation/fixes/CONTROL_CENTER_DATE_LABELS_FIX.md
@@ -0,0 +1,27 @@
+# Control Center Date Labels Fix (Version 0.235.074)
+
+## Header Information
+- **Fix Title:** Control Center Date Labels Fix
+- **Issue Description:** Control Center charts displayed dates one day behind due to UTC parsing of date keys.
+- **Root Cause Analysis:** The frontend parsed YYYY-MM-DD strings with `new Date(...)`, which treats the value as UTC and shifts the day in local timezones.
+- **Fixed/Implemented in version:** **0.235.074**
+- **Config Version Updated:** `config.py` VERSION set to **0.235.074**
+
+## Technical Details
+- **Files Modified:**
+ - application/single_app/static/js/control-center.js
+ - application/single_app/config.py
+- **Code Changes Summary:**
+ - Added a local date parsing helper for YYYY-MM-DD keys.
+ - Updated chart label and tooltip rendering to use local date parsing.
+ - Bumped application version in config.py.
+- **Testing Approach:**
+ - Added a functional test to validate the date parsing helper is present in the chart logic.
+
+## Validation
+- **Test Results:** functional_tests/test_control_center_date_labels_fix.py
+- **Before/After Comparison:**
+ - Before: Date labels in charts appeared one day behind in local timezones.
+ - After: Date labels match the correct local date (e.g., Jan 21 for today).
+- **User Experience Improvements:**
+ - Accurate daily labels across all activity charts.
diff --git a/docs/explanation/fixes/CUSTOM_ENDPOINT_FLAGS_WORKSPACE_MERGE_FIX.md b/docs/explanation/fixes/CUSTOM_ENDPOINT_FLAGS_WORKSPACE_MERGE_FIX.md
new file mode 100644
index 00000000..7639e607
--- /dev/null
+++ b/docs/explanation/fixes/CUSTOM_ENDPOINT_FLAGS_WORKSPACE_MERGE_FIX.md
@@ -0,0 +1,51 @@
+# Custom Endpoint Flags & Workspace Merge Fix (Version 0.236.058)
+
+## Header Information
+- Fix Title: Custom endpoint flag migration and workspace merge enforcement
+- Issue Description: Custom endpoint toggles only applied to agents, endpoints loaded despite being disabled, and workspace merges could include global + personal + group agents simultaneously.
+- Root Cause Analysis: Legacy settings names were still in use, backend routes did not consistently enforce the custom endpoint flags, and agent selection merged multiple scopes at once.
+- Version Implemented: 0.236.058
+- Fixed/Implemented in version: **0.236.058**
+- Config version updated in: application/single_app/config.py
+
+## Technical Details
+### Files Modified
+- application/single_app/functions_settings.py
+- application/single_app/route_backend_agents.py
+- application/single_app/route_backend_models.py
+- application/single_app/route_frontend_admin_settings.py
+- application/single_app/semantic_kernel_loader.py
+- application/single_app/static/js/agent_modal_stepper.js
+- application/single_app/static/js/workspace/group_agents.js
+- application/single_app/static/js/chat/chat-agents.js
+- application/single_app/static/js/chat/chat-retry.js
+- application/single_app/static/js/admin/admin_settings.js
+- application/single_app/templates/admin_settings.html
+- application/single_app/templates/workspace.html
+- application/single_app/templates/group_workspaces.html
+
+### Code Changes Summary
+- Added new custom endpoint flags (allow_user_custom_endpoints, allow_group_custom_endpoints) with migration from legacy names.
+- Enforced custom endpoint flags across user/group model endpoint routes and agent payload handling.
+- Scoped agent and model merges to global + workspace (personal or group) only.
+- Updated frontend toggles and visibility to respect the new settings.
+
+### Testing Approach
+- Added functional test to validate migration and legacy flag sync.
+
+### Impact Analysis
+- Prevents disabled endpoint settings from leaking into workspace UI and backend payloads.
+- Ensures group workspaces and personal workspaces only merge global agents with their own scope.
+- Maintains backward compatibility by migrating legacy settings.
+
+## Validation
+- Test Results: Functional test added for settings migration.
+- Before/After Comparison:
+ - Before: Endpoint settings could appear even when disabled; merges could include global + personal + group simultaneously.
+ - After: Endpoint visibility and persistence are gated by flags; merges are limited to one workspace scope.
+- User Experience Improvements:
+ - Clearer, consistent enforcement of custom endpoint permissions.
+ - Reduced confusion around which agents/models are available per workspace.
+
+## Related Tests
+- functional_tests/test_custom_endpoint_settings_migration.py
diff --git a/docs/explanation/fixes/DEFAULT_MODEL_DROPDOWN_FOUNDRY_MODELS_FIX.md b/docs/explanation/fixes/DEFAULT_MODEL_DROPDOWN_FOUNDRY_MODELS_FIX.md
new file mode 100644
index 00000000..4a82ea31
--- /dev/null
+++ b/docs/explanation/fixes/DEFAULT_MODEL_DROPDOWN_FOUNDRY_MODELS_FIX.md
@@ -0,0 +1,33 @@
+# Default Model Dropdown Foundry Models Fix (Version 0.236.057)
+
+## Issue Description
+The admin multi-endpoint default model dropdown did not list Azure AI Foundry models when model IDs or enabled flags were missing in stored endpoint data. This caused the dropdown to only show Azure OpenAI entries.
+
+## Root Cause Analysis
+Endpoint data could be missing model IDs and enabled flags, and those values were not normalized at the data layer. Missing identifiers made Foundry models appear unavailable in the dropdown.
+
+## Fix Summary
+- Normalize model endpoints on the backend so IDs and enabled flags are consistently present.
+- Use stable fallback identifiers derived from deployment names when IDs are missing.
+
+## Technical Details
+- Files modified:
+ - application/single_app/functions_settings.py
+ - application/single_app/route_frontend_admin_settings.py
+ - application/single_app/route_backend_models.py
+ - application/single_app/route_backend_chats.py
+ - application/single_app/config.py
+- Added backend normalization so endpoints/models always include stable IDs and enabled flags.
+- Updated multi-endpoint resolution to match by deployment/model names when IDs are missing.
+
+## Testing
+- Functional test: functional_tests/test_model_endpoint_normalization_backend.py
+
+## Impact Analysis
+- Admins can now see Foundry deployments in the default model dropdown even when IDs were previously missing.
+- Existing endpoint data remains compatible and is normalized in-memory before rendering.
+
+## Fixed/Implemented in version: **0.236.057**
+
+## Config Version Update
+- Updated VERSION in application/single_app/config.py to 0.236.057.
diff --git a/docs/explanation/fixes/DEFAULT_MODEL_SELECTION_FALLBACK_FIX.md b/docs/explanation/fixes/DEFAULT_MODEL_SELECTION_FALLBACK_FIX.md
new file mode 100644
index 00000000..20b25e70
--- /dev/null
+++ b/docs/explanation/fixes/DEFAULT_MODEL_SELECTION_FALLBACK_FIX.md
@@ -0,0 +1,38 @@
+# Default Model Selection Fallback Fix (Version 0.236.053)
+
+## Overview
+Adds an admin-configurable default model selection for multi-endpoint configurations and uses it as a fallback when agent requests omit model information.
+
+## Issue Description
+Agent-based requests can omit `model_id` and `model_endpoint_id`, which caused GPT initialization to fail when multiple deployments were configured. Without a default model, summarization and fallback logic could not resolve a usable GPT client.
+
+## Root Cause Analysis
+Multi-endpoint GPT resolution requires `model_id` and `model_endpoint_id`. Agent requests do not always send those fields, and there was no default model configured to bridge the gap.
+
+## Fix Summary
+- Added a default model selection to admin settings for multi-endpoint mode.
+- Persisted default model selection in settings and validated it against configured endpoints/models.
+- Added fallback GPT initialization using the default selection for agent requests without model info.
+- Supports both Azure OpenAI and Azure AI Foundry providers.
+
+## Files Modified
+- application/single_app/functions_settings.py
+- application/single_app/templates/admin_settings.html
+- application/single_app/static/js/admin/admin_model_endpoints.js
+- application/single_app/route_frontend_admin_settings.py
+- application/single_app/route_backend_chats.py
+- application/single_app/config.py
+- functional_tests/test_default_model_selection_fallback.py
+
+## Testing
+- Added functional test: `test_default_model_selection_fallback.py`.
+
+## Validation
+- Verified admin UI renders the default model selector and stores the selection.
+- Verified chat GPT initialization can resolve default model when agent requests omit model info.
+
+## Version
+Fixed/Implemented in version: **0.236.053**
+
+## Config Version Reference
+`config.py` updated to `VERSION = "0.236.053"`.
diff --git a/docs/explanation/fixes/DOCUMENT_UPLOAD_TRACEBACK_SHADOW_FIX.md b/docs/explanation/fixes/DOCUMENT_UPLOAD_TRACEBACK_SHADOW_FIX.md
new file mode 100644
index 00000000..e7d41388
--- /dev/null
+++ b/docs/explanation/fixes/DOCUMENT_UPLOAD_TRACEBACK_SHADOW_FIX.md
@@ -0,0 +1,53 @@
+# Document Upload Traceback Shadow Fix
+
+Fixed/Implemented in version: **0.239.165**
+
+## Issue Description
+
+Uploading PDF and DOCX documents could fail during processing with this message:
+
+`Processing failed: cannot access local variable 'traceback' where it is not associated with a value`
+
+The failure happened while the document ingestion flow was handling an earlier exception, so the user saw the traceback-scoping error instead of the original upload problem.
+
+## Root Cause Analysis
+
+- `process_di_document(...)` used `traceback.format_exc()` in one exception path.
+- The same function also had a later branch with `import traceback` inside the function body.
+- In Python, that function-local import makes `traceback` a local variable for the entire function scope.
+- When an earlier exception path tried to call `traceback.format_exc()` before the local import executed, Python raised an unbound local error.
+
+## Technical Details
+
+### Files Modified
+
+- `application/single_app/functions_documents.py`
+- `application/single_app/config.py`
+- `functional_tests/test_document_upload_traceback_shadow_fix.py`
+
+### Code Changes Summary
+
+- Added a module-level `import traceback` in `functions_documents.py`.
+- Removed function-local `import traceback` statements so exception handlers all use the same module-scoped import.
+- Bumped the application version to `0.239.165`.
+- Added a regression test that inspects the AST for `process_di_document(...)` and verifies the fix remains in place.
+
+### Testing Approach
+
+- Added `functional_tests/test_document_upload_traceback_shadow_fix.py` to verify:
+- `functions_documents.py` imports `traceback` at module scope.
+- `process_di_document(...)` does not locally import `traceback`.
+- `process_di_document(...)` still uses `traceback.format_exc()` for diagnostics.
+
+## Validation
+
+### Before
+
+- PDF and DOCX uploads could fail with an unbound local error while handling a processing exception.
+- The error masked the original processing failure and made upload debugging harder.
+
+### After
+
+- The upload pipeline uses a consistent module-scoped `traceback` import.
+- PDF and DOCX processing no longer trips the unbound local error caused by traceback shadowing.
+- Any real underlying processing error now surfaces through the intended exception handling path.
\ No newline at end of file
diff --git a/docs/explanation/fixes/ENDPOINTS_TAB_ORDER_VISIBILITY_FIX.md b/docs/explanation/fixes/ENDPOINTS_TAB_ORDER_VISIBILITY_FIX.md
new file mode 100644
index 00000000..893df6f9
--- /dev/null
+++ b/docs/explanation/fixes/ENDPOINTS_TAB_ORDER_VISIBILITY_FIX.md
@@ -0,0 +1,36 @@
+# Endpoints Tab Order & Visibility Fix (v0.236.046)
+
+## Issue Description
+Endpoints tabs appeared before Actions and were still visible even when admin settings disallowed custom endpoints.
+
+## Root Cause Analysis
+The tab ordering placed endpoints ahead of actions, and the endpoints UI was only gated by general agent settings instead of the custom endpoint permission flags.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.046**
+
+## Technical Details
+### Files Modified
+- application/single_app/templates/workspace.html
+- application/single_app/templates/group_workspaces.html
+- application/single_app/config.py
+- functional_tests/test_endpoints_tab_order_visibility.py
+
+### Code Changes Summary
+- Reordered tabs to follow Documents → Prompts → Agents → Actions → Endpoints.
+- Gated endpoints tabs and panes behind admin custom endpoint settings.
+- Added a functional test to verify tab ordering and gating.
+- Incremented the application version.
+
+### Testing Approach
+- Functional test validates the tab order and permission gating in both templates.
+
+## Impact Analysis
+- Endpoints UI is fully hidden unless admins allow custom endpoints.
+- Tab order matches the requested workflow order.
+
+## Validation
+- Functional test: functional_tests/test_endpoints_tab_order_visibility.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.046**.
diff --git a/docs/explanation/fixes/FOUNDARY_DEPLOYMENT_DISABLED_FILTER_FIX.md b/docs/explanation/fixes/FOUNDARY_DEPLOYMENT_DISABLED_FILTER_FIX.md
new file mode 100644
index 00000000..265b4fe6
--- /dev/null
+++ b/docs/explanation/fixes/FOUNDARY_DEPLOYMENT_DISABLED_FILTER_FIX.md
@@ -0,0 +1,34 @@
+# Foundry Deployment Disabled Filter Fix (v0.236.025)
+
+## Issue Description
+Model discovery included deployments that were disabled, causing unavailable models to appear in the UI.
+
+## Root Cause Analysis
+The deployment list responses were not filtered by provisioning state.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.025**
+
+## Technical Details
+### Files Modified
+- application/single_app/route_backend_models.py
+- application/single_app/static/js/admin/admin_model_endpoints.js
+- application/single_app/config.py
+
+### Code Changes Summary
+- Added deployment provisioning state filtering (exclude non-succeeded deployments).
+- Switched Foundry management discovery to deployments list.
+- Updated Foundry validation to require resource group.
+- Incremented the application version.
+
+### Testing Approach
+- Added a functional test for deployment state filtering.
+
+### Impact Analysis
+- Prevents disabled deployments from appearing in model selection.
+
+## Validation
+- Functional test: functional_tests/test_foundry_deployment_disabled_filter.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.025**.
diff --git a/docs/explanation/fixes/FOUNDRY_AGENT_ENDPOINT_RESOLUTION_FIX.md b/docs/explanation/fixes/FOUNDRY_AGENT_ENDPOINT_RESOLUTION_FIX.md
new file mode 100644
index 00000000..d7503255
--- /dev/null
+++ b/docs/explanation/fixes/FOUNDRY_AGENT_ENDPOINT_RESOLUTION_FIX.md
@@ -0,0 +1,31 @@
+# Foundry Agent Endpoint Resolution Fix (Version 0.236.051)
+
+## Overview
+Ensures Azure AI Foundry agents resolve project-scoped endpoints reliably by enriching agent configuration with the Foundry project name and endpoint ID during kernel load.
+
+## Issue Description
+Foundry agent chat invocation failed for agents configured with a Foundry endpoint that required a project name. The runtime endpoint resolution did not receive the project name, so `/api/projects/` was not appended, resulting in invalid requests.
+
+## Root Cause Analysis
+`resolve_agent_config` enriched Foundry settings with the endpoint URL and API version, but did not propagate `project_name` or `endpoint_id` from the selected Foundry endpoint configuration. Agents created earlier could also lack `model_endpoint_id`, leaving only `other_settings.azure_ai_foundry.endpoint_id` for resolution.
+
+## Fix Summary
+- Enrich Foundry settings with `project_name` and `endpoint_id` from the selected endpoint configuration.
+- Add endpoint ID fallback to resolve Foundry endpoint config when `model_endpoint_id` is missing.
+
+## Files Modified
+- application/single_app/semantic_kernel_loader.py
+- application/single_app/config.py
+- functional_tests/test_foundry_agent_endpoint_resolution.py
+
+## Testing
+- Added functional test: `test_foundry_agent_endpoint_resolution.py`.
+
+## Validation
+- Verified Foundry settings enrichment includes `project_name` and endpoint ID fallback logic.
+
+## Version
+Fixed/Implemented in version: **0.236.051**
+
+## Config Version Reference
+`config.py` updated to `VERSION = "0.236.051"`.
diff --git a/docs/explanation/fixes/FOUNDRY_AGENT_ENDPOINT_VALIDATION_FIX.md b/docs/explanation/fixes/FOUNDRY_AGENT_ENDPOINT_VALIDATION_FIX.md
new file mode 100644
index 00000000..fa3fdf88
--- /dev/null
+++ b/docs/explanation/fixes/FOUNDRY_AGENT_ENDPOINT_VALIDATION_FIX.md
@@ -0,0 +1,36 @@
+# Foundry Agent Endpoint Validation Fix (Version 0.236.060)
+
+## Header Information
+- Fix Title: Prevent Foundry agent invocation without configured endpoint
+- Issue Description: Foundry agents could be invoked without an endpoint resolved, resulting in runtime failures during agent execution.
+- Root Cause Analysis: Foundry agents were registered even when endpoint configuration was missing, leaving runtime resolution to fail.
+- Version Implemented: 0.236.060
+- Fixed/Implemented in version: **0.236.060**
+- Config version updated in: application/single_app/config.py
+
+## Technical Details
+### Files Modified
+- application/single_app/semantic_kernel_loader.py
+- application/single_app/config.py
+
+### Code Changes Summary
+- Added a helper to resolve Foundry endpoints using agent settings, global settings, and environment fallback.
+- Prevented Foundry agent registration when no endpoint is available, falling back to kernel-only mode.
+
+### Testing Approach
+- Added functional test covering Foundry endpoint resolution priority and fallback.
+
+### Impact Analysis
+- Avoids runtime errors when Foundry agent endpoint configuration is missing.
+- Improves clarity by keeping kernel-only mode when configuration is incomplete.
+
+## Validation
+- Test Results: Functional test added for endpoint resolution logic.
+- Before/After Comparison:
+ - Before: Foundry agent invocation failed at runtime with missing endpoint.
+ - After: Foundry agent registration is skipped and kernel-only mode is used when endpoint is missing.
+- User Experience Improvements:
+ - Clearer fallback behavior and fewer runtime errors for misconfigured Foundry agents.
+
+## Related Tests
+- functional_tests/test_foundry_endpoint_resolution.py
diff --git a/docs/explanation/fixes/FOUNDRY_AGENT_LIST_ASYNC_PAGING_FIX.md b/docs/explanation/fixes/FOUNDRY_AGENT_LIST_ASYNC_PAGING_FIX.md
new file mode 100644
index 00000000..dbb35de9
--- /dev/null
+++ b/docs/explanation/fixes/FOUNDRY_AGENT_LIST_ASYNC_PAGING_FIX.md
@@ -0,0 +1,33 @@
+# Foundry Agent List Async Paging Fix (v0.236.047)
+
+## Issue Description
+Fetching Azure AI Foundry agents failed with "object AsyncItemPaged can't be used in 'await' expression".
+
+## Root Cause Analysis
+The async list operation returns an `AsyncItemPaged` sequence, but the code awaited it directly instead of iterating over the async iterator.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.047**
+
+## Technical Details
+### Files Modified
+- application/single_app/foundry_agent_runtime.py
+- application/single_app/config.py
+- functional_tests/test_foundry_agent_list_async_paging.py
+
+### Code Changes Summary
+- Stopped awaiting the list call and added async iteration for paged results.
+- Kept support for list/dict responses.
+- Incremented the application version.
+
+### Testing Approach
+- Functional test checks for async iteration and non-awaited list calls.
+
+## Impact Analysis
+- Foundry agent discovery now works reliably across paged responses.
+
+## Validation
+- Functional test: functional_tests/test_foundry_agent_list_async_paging.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.047**.
diff --git a/docs/explanation/fixes/FOUNDRY_AGENT_LIST_PROJECT_ENDPOINT_FIX.md b/docs/explanation/fixes/FOUNDRY_AGENT_LIST_PROJECT_ENDPOINT_FIX.md
new file mode 100644
index 00000000..b3743b43
--- /dev/null
+++ b/docs/explanation/fixes/FOUNDRY_AGENT_LIST_PROJECT_ENDPOINT_FIX.md
@@ -0,0 +1,34 @@
+# Foundry Agent List Project Endpoint Fix (v0.236.048)
+
+## Issue Description
+Listing Azure AI Foundry agents returned a 404 Resource not found error for endpoints configured without the project path.
+
+## Root Cause Analysis
+Agent listing used the base endpoint and omitted the required `/api/projects/{project_name}` segment when a project name was configured.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.048**
+
+## Technical Details
+### Files Modified
+- application/single_app/foundry_agent_runtime.py
+- application/single_app/route_backend_models.py
+- application/single_app/config.py
+- functional_tests/test_foundry_agent_list_project_endpoint.py
+
+### Code Changes Summary
+- Added project name to Foundry settings.
+- Appended `/api/projects/{project_name}` when missing.
+- Incremented the application version.
+
+### Testing Approach
+- Functional test checks for project name wiring and endpoint normalization.
+
+## Impact Analysis
+- Foundry agent listing works for project-scoped endpoints.
+
+## Validation
+- Functional test: functional_tests/test_foundry_agent_list_project_endpoint.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.048**.
diff --git a/docs/explanation/fixes/FOUNDRY_CHAT_INFERENCE_SCOPE_FIX.md b/docs/explanation/fixes/FOUNDRY_CHAT_INFERENCE_SCOPE_FIX.md
new file mode 100644
index 00000000..e253a6d2
--- /dev/null
+++ b/docs/explanation/fixes/FOUNDRY_CHAT_INFERENCE_SCOPE_FIX.md
@@ -0,0 +1,35 @@
+# Foundry Chat Inference Scope Fix (v0.236.030)
+
+## Header Information
+- **Fix Title:** Foundry chat inference uses cloud-aware scopes
+- **Issue Description:** Multi-endpoint Foundry inference in chat returned 401 Unauthorized due to incorrect token audience.
+- **Root Cause Analysis:** Chat route always used the Cognitive Services scope and did not apply the Foundry scope per cloud.
+- **Version Implemented:** 0.236.030
+
+## Fixed/Implemented in version: **0.236.030**
+
+## Technical Details
+- **Files Modified:**
+ - application/single_app/route_backend_chats.py
+ - functional_tests/test_foundry_chat_scope_resolution.py
+ - application/single_app/config.py
+- **Code Changes Summary:**
+ - Added Foundry scope resolution for multi-endpoint chat inference.
+ - Applied service principal cloud tags and endpoint-based scope inference for managed identity.
+ - Added a functional test validating scope resolution.
+ - Incremented the application version to 0.236.030.
+- **Testing Approach:**
+ - Functional test checks for cloud-aware scope logic in chat routes.
+- **Impact Analysis:**
+ - Foundry chat inference now requests tokens with the correct audience per cloud.
+
+## Validation
+- **Test Results:** Functional test added (see functional_tests/test_foundry_chat_scope_resolution.py).
+- **Before/After Comparison:**
+ - **Before:** Foundry chat inference always used Cognitive Services scope.
+ - **After:** Foundry chat inference derives scope based on cloud and endpoint.
+- **User Experience Improvements:**
+ - Foundry model inference works in multi-endpoint chat without unauthorized errors.
+
+## References
+- Config version updated in application/single_app/config.py to 0.236.030.
diff --git a/docs/explanation/fixes/FOUNDRY_INFERENCE_SCOPE_FIX.md b/docs/explanation/fixes/FOUNDRY_INFERENCE_SCOPE_FIX.md
new file mode 100644
index 00000000..47e4d16c
--- /dev/null
+++ b/docs/explanation/fixes/FOUNDRY_INFERENCE_SCOPE_FIX.md
@@ -0,0 +1,34 @@
+# Foundry Inference Scope Fix (v0.236.027)
+
+## Header Information
+- **Fix Title:** Foundry inference token scope uses AI Foundry audience
+- **Issue Description:** Testing Foundry models returned 401 Unauthorized due to an incorrect token audience.
+- **Root Cause Analysis:** The inference token used the Cognitive Services scope instead of the AI Foundry scope for Foundry project endpoints.
+- **Version Implemented:** 0.236.027
+
+## Fixed/Implemented in version: **0.236.027**
+
+## Technical Details
+- **Files Modified:**
+ - application/single_app/route_backend_models.py
+ - application/single_app/config.py
+ - functional_tests/test_foundry_inference_scope_fix.py
+- **Code Changes Summary:**
+ - Updated inference client to use the AI Foundry token scope for Foundry endpoints.
+ - Added a functional test to assert provider-aware scope selection.
+ - Incremented the application version to 0.236.027.
+- **Testing Approach:**
+ - Functional test validates the Foundry-specific scope is present in the inference path.
+- **Impact Analysis:**
+ - Foundry model tests authenticate successfully when using managed identity or service principal.
+
+## Validation
+- **Test Results:** Functional test added (see functional_tests/test_foundry_inference_scope_fix.py).
+- **Before/After Comparison:**
+ - **Before:** Foundry model test used Cognitive Services token scope.
+ - **After:** Foundry model test uses AI Foundry token scope.
+- **User Experience Improvements:**
+ - Admins can test Foundry model connections without unauthorized errors.
+
+## References
+- Config version updated in application/single_app/config.py to 0.236.027.
diff --git a/docs/explanation/fixes/FOUNDRY_MANAGEMENT_FIELDS_CLEANUP_FIX.md b/docs/explanation/fixes/FOUNDRY_MANAGEMENT_FIELDS_CLEANUP_FIX.md
new file mode 100644
index 00000000..d2c52547
--- /dev/null
+++ b/docs/explanation/fixes/FOUNDRY_MANAGEMENT_FIELDS_CLEANUP_FIX.md
@@ -0,0 +1,37 @@
+# Foundry Management Fields Cleanup Fix (v0.236.029)
+
+## Header Information
+- **Fix Title:** Remove management fields from Foundry endpoint configuration
+- **Issue Description:** Foundry endpoint configuration still referenced subscription/resource group/location fields after moving to project-scoped discovery.
+- **Root Cause Analysis:** The modal payload and debug logging retained AOAI management metadata for Foundry endpoints.
+- **Version Implemented:** 0.236.029
+
+## Fixed/Implemented in version: **0.236.029**
+
+## Technical Details
+- **Files Modified:**
+ - application/single_app/static/js/admin/admin_model_endpoints.js
+ - application/single_app/route_backend_models.py
+ - functional_tests/test_foundry_management_fields_cleanup.py
+ - application/single_app/config.py
+- **Code Changes Summary:**
+ - Removed location field usage from the endpoint modal script.
+ - Ensured management metadata is only included for Azure OpenAI payloads.
+ - Cleaned backend debug logging to remove location references.
+ - Added a functional test for Foundry management field cleanup.
+ - Incremented the application version to 0.236.029.
+- **Testing Approach:**
+ - Functional test validates the modal payload uses management fields only for AOAI.
+- **Impact Analysis:**
+ - Foundry endpoint configuration is now project-only without AOAI management metadata.
+
+## Validation
+- **Test Results:** Functional test added (see functional_tests/test_foundry_management_fields_cleanup.py).
+- **Before/After Comparison:**
+ - **Before:** Foundry payload carried AOAI management fields and location debug logs.
+ - **After:** Foundry payload excludes management fields and location is removed.
+- **User Experience Improvements:**
+ - Admins no longer see or save unnecessary management fields for Foundry endpoints.
+
+## References
+- Config version updated in application/single_app/config.py to 0.236.029.
diff --git a/docs/explanation/fixes/FOUNDRY_MODEL_LIST_ENDPOINT_FALLBACK_FIX.md b/docs/explanation/fixes/FOUNDRY_MODEL_LIST_ENDPOINT_FALLBACK_FIX.md
new file mode 100644
index 00000000..c19d99a2
--- /dev/null
+++ b/docs/explanation/fixes/FOUNDRY_MODEL_LIST_ENDPOINT_FALLBACK_FIX.md
@@ -0,0 +1,32 @@
+# Foundry Model List Endpoint Fallback Fix (v0.236.024)
+
+## Issue Description
+Model discovery for Azure AI Foundry returned a 400 error against `/openai/v1/models` for certain Foundry endpoints.
+
+## Root Cause Analysis
+Some Foundry endpoints expect the Azure OpenAI data-plane path `/openai/models` rather than `/openai/v1/models`.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.024**
+
+## Technical Details
+### Files Modified
+- application/single_app/route_backend_models.py
+- application/single_app/config.py
+
+### Code Changes Summary
+- Attempt `/openai/models` first, fall back to `/openai/v1/models`.
+- Added debug output for the attempted URLs.
+- Incremented the application version.
+
+### Testing Approach
+- Added a functional test to validate the fallback order.
+
+### Impact Analysis
+- Reduces 400 errors when listing models from Foundry endpoints.
+
+## Validation
+- Functional test: functional_tests/test_foundry_model_list_fallback.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.024**.
diff --git a/docs/explanation/fixes/FOUNDRY_PROJECT_DEPLOYMENTS_FETCH_FIX.md b/docs/explanation/fixes/FOUNDRY_PROJECT_DEPLOYMENTS_FETCH_FIX.md
new file mode 100644
index 00000000..8e5d9f68
--- /dev/null
+++ b/docs/explanation/fixes/FOUNDRY_PROJECT_DEPLOYMENTS_FETCH_FIX.md
@@ -0,0 +1,37 @@
+# Foundry Project Deployments Fetch Fix (v0.236.026)
+
+## Header Information
+- **Fix Title:** Foundry project deployments fetch uses project-scoped API
+- **Issue Description:** Foundry model discovery was pulling from account-level or data-plane lists, which did not align with project-scoped deployments shown in Foundry projects.
+- **Root Cause Analysis:** The discovery logic relied on generic model list fallbacks and management-plane deployments, instead of the project deployments list endpoint.
+- **Version Implemented:** 0.236.026
+
+## Fixed/Implemented in version: **0.236.026**
+
+## Technical Details
+- **Files Modified:**
+ - application/single_app/route_backend_models.py
+ - application/single_app/static/js/admin/admin_model_endpoints.js
+ - functional_tests/test_foundry_model_list_fallback.py
+ - application/single_app/config.py
+- **Code Changes Summary:**
+ - Switched Foundry discovery to the project deployments list endpoint using the project API scope.
+ - Removed subscription/resource group requirement for Foundry project discovery in the admin modal validation.
+ - Updated the functional test to validate project deployments discovery.
+ - Incremented the application version to 0.236.026.
+- **Testing Approach:**
+ - Updated functional test validates project deployments discovery helper and scope usage.
+- **Impact Analysis:**
+ - Foundry model discovery now reflects project-scoped deployments.
+ - Admin configuration flow no longer blocks on subscription/resource group for Foundry discovery.
+
+## Validation
+- **Test Results:** Functional test updated (see functional_tests/test_foundry_model_list_fallback.py).
+- **Before/After Comparison:**
+ - **Before:** Foundry model discovery used account-level or data-plane model listings.
+ - **After:** Foundry model discovery uses the project deployments list endpoint.
+- **User Experience Improvements:**
+ - Admins see the same project deployments as the Foundry project UI.
+
+## References
+- Config version updated in application/single_app/config.py to 0.236.026.
diff --git a/docs/explanation/fixes/FOUNDRY_SCOPE_BY_CLOUD_FIX.md b/docs/explanation/fixes/FOUNDRY_SCOPE_BY_CLOUD_FIX.md
new file mode 100644
index 00000000..b9cd9147
--- /dev/null
+++ b/docs/explanation/fixes/FOUNDRY_SCOPE_BY_CLOUD_FIX.md
@@ -0,0 +1,38 @@
+# Foundry Scope by Cloud Fix (v0.236.028)
+
+## Header Information
+- **Fix Title:** Foundry scope derived from cloud configuration
+- **Issue Description:** Foundry tokens used a hardcoded audience and did not support government or custom cloud scopes.
+- **Root Cause Analysis:** Scope selection was fixed to the public cloud audience and lacked a custom scope override.
+- **Version Implemented:** 0.236.028
+
+## Fixed/Implemented in version: **0.236.028**
+
+## Technical Details
+- **Files Modified:**
+ - application/single_app/route_backend_models.py
+ - application/single_app/static/js/admin/admin_model_endpoints.js
+ - application/single_app/templates/admin_settings.html
+ - functional_tests/test_foundry_inference_scope_fix.py
+ - application/single_app/config.py
+- **Code Changes Summary:**
+ - Added Foundry scope resolver that maps to public/government audiences and supports a custom scope value.
+ - Added a Foundry scope input for custom cloud service principal configurations.
+ - Hid Azure OpenAI subscription/resource group fields for Foundry endpoints.
+ - Updated the functional test to validate cloud-specific scope handling.
+ - Incremented the application version to 0.236.028.
+- **Testing Approach:**
+ - Functional test validates presence of cloud-specific and custom scope logic.
+- **Impact Analysis:**
+ - Foundry model discovery and inference now use the correct audience for public, government, and custom clouds.
+
+## Validation
+- **Test Results:** Functional test updated (see functional_tests/test_foundry_inference_scope_fix.py).
+- **Before/After Comparison:**
+ - **Before:** Foundry scope was hardcoded to public cloud.
+ - **After:** Foundry scope is derived from cloud configuration with custom override support.
+- **User Experience Improvements:**
+ - Admins can configure Foundry endpoints across supported clouds without manual code changes.
+
+## References
+- Config version updated in application/single_app/config.py to 0.236.028.
diff --git a/docs/explanation/fixes/GROUP_AGENT_SELECTION_ACTIVE_GROUP_FIX.md b/docs/explanation/fixes/GROUP_AGENT_SELECTION_ACTIVE_GROUP_FIX.md
new file mode 100644
index 00000000..d27ab507
--- /dev/null
+++ b/docs/explanation/fixes/GROUP_AGENT_SELECTION_ACTIVE_GROUP_FIX.md
@@ -0,0 +1,38 @@
+# Group Agent Selection Active Group Fix (Version 0.236.059)
+
+## Header Information
+- Fix Title: Enforce active group when resolving requested agents
+- Issue Description: Group agent requests were resolving to the fallback researcher agent because the loader matched only personal/global agents after request overrides.
+- Root Cause Analysis: Request agent overrides were stored as names only, which caused group scope to be lost; selection logic also ignored group scope when matching candidates.
+- Version Implemented: 0.236.059
+- Fixed/Implemented in version: **0.236.059**
+- Config version updated in: application/single_app/config.py
+
+## Technical Details
+### Files Modified
+- application/single_app/route_backend_chats.py
+- application/single_app/semantic_kernel_loader.py
+- application/single_app/config.py
+
+### Code Changes Summary
+- Persisted full request agent metadata in request context for scope-aware selection.
+- Enforced group_id presence and active-group matching before loading group agents.
+- Added scope-aware agent matching to prevent fallback to personal/global agents with the same name.
+
+### Testing Approach
+- Added functional test to validate scope-aware agent matching.
+
+### Impact Analysis
+- Group agent selection respects the active group and rejects mismatched group requests.
+- Prevents falling back to researcher when the requested group agent is missing or out of scope.
+
+## Validation
+- Test Results: Functional test added for scope-aware selection.
+- Before/After Comparison:
+ - Before: Group agent request often fell back to global/personal researcher.
+ - After: Group agent selection only succeeds when group_id matches the active group; otherwise kernel loads core plugins only.
+- User Experience Improvements:
+ - Correct agent selection for group workspaces with explicit scope enforcement.
+
+## Related Tests
+- functional_tests/test_group_agent_selection_scope.py
diff --git a/docs/explanation/fixes/MODEL_ENDPOINT_MIGRATION_AUTH_FIX.md b/docs/explanation/fixes/MODEL_ENDPOINT_MIGRATION_AUTH_FIX.md
new file mode 100644
index 00000000..7a83c87f
--- /dev/null
+++ b/docs/explanation/fixes/MODEL_ENDPOINT_MIGRATION_AUTH_FIX.md
@@ -0,0 +1,34 @@
+# Model Endpoint Migration Auth Fix (v0.236.016)
+
+## Issue Description
+Automatic migration to multi-endpoint settings did not preserve legacy authentication type, API key, subscription ID, or resource group, resulting in incomplete endpoint configurations.
+
+## Root Cause Analysis
+The migration logic only populated `auth.type` with the legacy `azure_openai_gpt_authentication_type` value, which uses `key` instead of the new `api_key` type. It also omitted management metadata needed for AOAI discovery.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.016**
+
+## Technical Details
+### Files Modified
+- application/single_app/route_frontend_admin_settings.py
+- application/single_app/config.py
+
+### Code Changes Summary
+- Mapped legacy `key` auth type to `api_key` during migration.
+- Carried forward `azure_openai_gpt_key` for API key auth.
+- Added `subscription_id` and `resource_group` to migrated endpoint management fields.
+- Incremented the application version.
+
+### Testing Approach
+- Added a functional test to verify migration wiring preserves auth and management fields.
+
+### Impact Analysis
+- Ensures migrated multi-endpoint configurations are complete and usable.
+- Prevents broken model discovery after migration.
+
+## Validation
+- Functional test: functional_tests/test_multi_endpoint_migration_auth_preserved.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.016**.
diff --git a/docs/explanation/fixes/MODEL_ENDPOINT_SAVE_BUTTON_FIX.md b/docs/explanation/fixes/MODEL_ENDPOINT_SAVE_BUTTON_FIX.md
new file mode 100644
index 00000000..e7c16c65
--- /dev/null
+++ b/docs/explanation/fixes/MODEL_ENDPOINT_SAVE_BUTTON_FIX.md
@@ -0,0 +1,32 @@
+# Model Endpoint Save Button Fix (v0.236.021)
+
+## Issue Description
+Clicking the Save Endpoint button in the model endpoint modal did not trigger any visible action.
+
+## Root Cause Analysis
+The click handler did not guard against errors, and failures in the save flow were not surfaced to the user.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.021**
+
+## Technical Details
+### Files Modified
+- application/single_app/static/js/admin/admin_model_endpoints.js
+- application/single_app/config.py
+
+### Code Changes Summary
+- Wrapped `saveEndpoint()` in a try/catch with toast error handling.
+- Ensured Save button prevents default behavior and always invokes the handler.
+- Incremented the application version.
+
+### Testing Approach
+- Added a functional test to confirm Save button wiring and error handling exists.
+
+### Impact Analysis
+- Save Endpoint now reliably triggers and surfaces errors.
+
+## Validation
+- Functional test: functional_tests/test_model_endpoints_save_button.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.021**.
diff --git a/docs/explanation/fixes/MODEL_ENDPOINT_SAVE_TOAST_REMINDER_FIX.md b/docs/explanation/fixes/MODEL_ENDPOINT_SAVE_TOAST_REMINDER_FIX.md
new file mode 100644
index 00000000..0201f8f8
--- /dev/null
+++ b/docs/explanation/fixes/MODEL_ENDPOINT_SAVE_TOAST_REMINDER_FIX.md
@@ -0,0 +1,31 @@
+# Model Endpoint Save Toast Reminder Fix (v0.236.022)
+
+## Issue Description
+The endpoint modal displayed a success toast after saving a model endpoint, which led admins to assume settings were persisted without clicking the main Save Settings button.
+
+## Root Cause Analysis
+The toast message did not clarify that modal changes are staged and require saving the admin settings form.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.022**
+
+## Technical Details
+### Files Modified
+- application/single_app/static/js/admin/admin_model_endpoints.js
+- application/single_app/config.py
+
+### Code Changes Summary
+- Updated the success toast to remind users to save settings to persist changes.
+- Incremented the application version.
+
+### Testing Approach
+- Added a functional test to confirm the updated toast message.
+
+### Impact Analysis
+- Reduces accidental data loss when navigating away without saving.
+
+## Validation
+- Functional test: functional_tests/test_model_endpoints_save_toast_message.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.022**.
diff --git a/docs/explanation/fixes/MODEL_ENDPOINT_TEST_BUTTON_FIX.md b/docs/explanation/fixes/MODEL_ENDPOINT_TEST_BUTTON_FIX.md
new file mode 100644
index 00000000..9f9b34dc
--- /dev/null
+++ b/docs/explanation/fixes/MODEL_ENDPOINT_TEST_BUTTON_FIX.md
@@ -0,0 +1,31 @@
+# Model Endpoint Test Button Fix (v0.236.020)
+
+## Issue Description
+The per-model Test Connection button did not trigger a test because the payload builder referenced `authType` before it was defined, which caused a runtime error.
+
+## Root Cause Analysis
+`buildEndpointPayload()` used `authType` in validation checks before declaring it, resulting in a `ReferenceError` and preventing any request from being sent.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.020**
+
+## Technical Details
+### Files Modified
+- application/single_app/static/js/admin/admin_model_endpoints.js
+- application/single_app/config.py
+
+### Code Changes Summary
+- Defined `authType` before provider-specific validation checks in `buildEndpointPayload()`.
+- Incremented the application version.
+
+### Testing Approach
+- Added a functional test to verify `authType` is declared before validation checks.
+
+### Impact Analysis
+- Per-model Test Connection now triggers correctly.
+
+## Validation
+- Functional test: functional_tests/test_model_endpoint_payload_auth_type_order.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.020**.
diff --git a/docs/explanation/fixes/MSGRAPH_INCREMENTAL_CONSENT_FLOW_FIX.md b/docs/explanation/fixes/MSGRAPH_INCREMENTAL_CONSENT_FLOW_FIX.md
new file mode 100644
index 00000000..5a859125
--- /dev/null
+++ b/docs/explanation/fixes/MSGRAPH_INCREMENTAL_CONSENT_FLOW_FIX.md
@@ -0,0 +1,57 @@
+# MSGRAPH_INCREMENTAL_CONSENT_FLOW_FIX.md
+
+## Microsoft Graph Incremental Consent Flow Fix (v0.239.175)
+
+Fixed/Implemented in version: **0.239.175**
+
+### Issue Description
+
+Microsoft Graph plugin calls could incorrectly tell users to grant permissions again even when the
+authentication app registration already had the delegated permissions granted. The plugin auth flow
+also lost the plugin-requested scopes during the `/getAToken` callback and redeemed only the base
+login scope set.
+
+### Root Cause Analysis
+
+The shared plugin token helper treated any silent-token miss as a consent problem and always built
+an interactive URL with `prompt=consent`. At the same time, the OAuth callback always redeemed
+`SCOPE` from `config.py` instead of the scopes originally requested by the plugin operation.
+
+### Technical Details
+
+Files modified:
+- `application/single_app/functions_authentication.py`
+- `application/single_app/route_frontend_authentication.py`
+- `application/single_app/config.py`
+- `functional_tests/test_msgraph_auth_consent_flow.py`
+
+Code changes summary:
+- Added session-backed tracking for plugin-requested OAuth scopes.
+- Stopped forcing `prompt=consent` for generic interactive reauthentication.
+- Preserved explicit consent prompting only when Microsoft Entra returns a consent-specific error.
+- Updated the `/getAToken` callback to redeem the stored plugin scopes instead of always using the
+ base login scope list.
+
+Testing approach:
+- Added `functional_tests/test_msgraph_auth_consent_flow.py` to verify:
+ - silent token misses return interactive auth without forced consent,
+ - explicit consent errors still force `prompt=consent`,
+ - the OAuth callback redeems the originally requested plugin scopes.
+
+Impact analysis:
+- Users should no longer be told to re-consent for already granted delegated Graph permissions.
+- Incremental Graph plugin scopes now survive the OAuth round-trip correctly.
+- The authentication app registration remains the expected delegated-auth client for Graph plugin
+ operations, while managed identity continues to be unrelated to `/me` Graph calls.
+
+### Validation
+
+Before:
+- Silent token misses were surfaced as consent requests.
+- Interactive URLs always forced `prompt=consent`.
+- The callback redeemed only the base login scopes and could miss plugin-specific Graph scopes.
+
+After:
+- Generic interactive sign-in requests no longer force a consent prompt.
+- Consent is only forced when Microsoft Entra explicitly reports missing consent.
+- The callback redeems the exact plugin-requested scopes and clears the temporary scope state.
\ No newline at end of file
diff --git a/docs/explanation/fixes/MSGRAPH_TIMEZONE_LOOKUP_FIX.md b/docs/explanation/fixes/MSGRAPH_TIMEZONE_LOOKUP_FIX.md
new file mode 100644
index 00000000..76882a30
--- /dev/null
+++ b/docs/explanation/fixes/MSGRAPH_TIMEZONE_LOOKUP_FIX.md
@@ -0,0 +1,47 @@
+# MSGRAPH_TIMEZONE_LOOKUP_FIX.md
+
+## Microsoft Graph Timezone Lookup Fix (v0.239.174)
+
+Fixed/Implemented in version: **0.239.174**
+
+### Issue Description
+
+Timezone-sensitive responses could be wrong because the core Semantic Kernel time plugin does not
+know the signed-in user's mailbox timezone and often defaults to UTC-oriented behavior.
+
+### Root Cause Analysis
+
+The application exposed Microsoft Graph calendar and mail operations but did not expose mailbox
+timezone settings. That left the agent without a user-specific timezone source when it needed to
+answer questions such as current local time, date boundaries, or time-relative interpretations.
+
+### Technical Details
+
+Files modified:
+- `application/single_app/semantic_kernel_plugins/msgraph_plugin.py`
+- `application/single_app/config.py`
+- `functional_tests/test_msgraph_plugin_operations.py`
+
+Code changes summary:
+- Added `get_my_timezone` to the Microsoft Graph plugin.
+- Wired the new operation to `GET /v1.0/me/mailboxSettings` with the `MailboxSettings.Read` scope.
+- Updated plugin metadata so agents can discover the timezone operation.
+- Extended functional coverage for the new timezone lookup.
+
+Testing approach:
+- Updated `functional_tests/test_msgraph_plugin_operations.py` to verify metadata exposure,
+ required scope usage, request path, and shaped timezone result payload.
+
+Impact analysis:
+- Agents now have an explicit per-user timezone source from Microsoft Graph.
+- Timezone-sensitive responses can prefer the user's mailbox timezone instead of assuming UTC.
+
+### Validation
+
+Before:
+- The agent had no dedicated Microsoft Graph timezone lookup.
+- TimePlugin-based answers could drift to UTC assumptions.
+
+After:
+- The agent can call `msgraph.get_my_timezone` to retrieve the mailbox timezone and formatting.
+- Time-sensitive answers can use mailbox timezone context before relying on generic time helpers.
\ No newline at end of file
diff --git a/docs/explanation/fixes/MULTIGPT_KEY_VAULT_SECRET_STORAGE_FIX.md b/docs/explanation/fixes/MULTIGPT_KEY_VAULT_SECRET_STORAGE_FIX.md
new file mode 100644
index 00000000..aac2c742
--- /dev/null
+++ b/docs/explanation/fixes/MULTIGPT_KEY_VAULT_SECRET_STORAGE_FIX.md
@@ -0,0 +1,118 @@
+# MULTIGPT KEY VAULT SECRET STORAGE FIX
+
+Fixed in version: **0.239.156**
+
+## Overview
+
+This fix adds Azure Key Vault support for MultiGPT model endpoint secrets across global, personal, and group endpoint configurations.
+
+When `enable_key_vault_secret_storage` is enabled, endpoint secrets entered for MultiGPT connections are now stored in Key Vault instead of remaining in persisted endpoint payloads. Backend fetch, test, Foundry listing, and runtime execution paths now resolve the stored secret server-side so the UI does not need to receive plaintext secrets after the initial save.
+
+Version `0.239.156` also fixes a follow-up regression in Foundry model discovery where sync fetch routes could import an async credential helper and fail with `'coroutine' object has no attribute 'token'` when requesting model lists.
+
+## Issue Description
+
+MultiGPT endpoint configuration supported secret-bearing auth fields such as:
+
+- `auth.api_key`
+- `auth.client_secret`
+
+Those fields were not integrated with the existing Key Vault helper flow that already supported agents and plugins.
+
+As a result:
+
+- endpoint secrets could remain outside the existing Key Vault lifecycle
+- admin and workspace editors depended on secrets being present in the browser to fetch models or test models
+- reopening a saved endpoint without the plaintext secret could break fetch and test operations
+- runtime consumers could fail if a saved endpoint only held a Key Vault reference and the execution path did not resolve it
+
+## Root Cause Analysis
+
+The original implementation had three separate gaps:
+
+1. MultiGPT endpoint auth fields did not have endpoint-specific Key Vault save/get/delete helpers.
+2. Fetch and test routes expected secrets in request payloads from the UI instead of merging with persisted endpoint configuration and resolving stored secrets on the server.
+3. Runtime endpoint consumers, especially Semantic Kernel multi-endpoint resolution and Foundry endpoint enrichment, used saved endpoint auth directly without a consistent Key Vault resolution step.
+
+The follow-up regression was caused by a fourth issue:
+
+4. Sync Foundry model-discovery code reused an async credential helper from the Foundry runtime layer, so synchronous token retrieval returned a coroutine instead of a token object.
+
+## Technical Details
+
+### Files Modified
+
+- `application/single_app/functions_keyvault.py`
+- `application/single_app/functions_settings.py`
+- `application/single_app/route_backend_models.py`
+- `application/single_app/route_frontend_admin_settings.py`
+- `application/single_app/semantic_kernel_loader.py`
+- `application/single_app/foundry_agent_runtime.py`
+- `application/single_app/static/js/admin/admin_model_endpoints.js`
+- `application/single_app/static/js/workspace/workspace_model_endpoints.js`
+- `application/single_app/config.py`
+- `functional_tests/test_foundry_model_fetch_sync_credentials.py`
+- `functional_tests/test_model_endpoints_key_vault_secret_storage.py`
+- `functional_tests/test_workspace_multi_endpoints.py`
+- `functional_tests/test_model_endpoint_normalization_backend.py`
+- `functional_tests/test_model_endpoints_api_key_manual_models.py`
+
+### Code Changes Summary
+
+- Added a dedicated `model-endpoint` Key Vault source and endpoint-specific helper functions for save, get, delete, and cleanup of obsolete references.
+- Added shared endpoint merge helpers so empty secret fields from the UI preserve stored secrets during edits.
+- Updated endpoint normalization to strip UI-only secret-presence flags before persistence.
+- Updated global, personal, and group endpoint save flows to store endpoint secrets in Key Vault when enabled.
+- Updated fetch/test request handling so saved endpoint auth is resolved server-side by endpoint ID and scope.
+- Updated Foundry agent listing and Semantic Kernel runtime endpoint selection to resolve Key Vault-backed endpoint auth before execution.
+- Split Foundry credential construction so runtime invocation keeps async credentials while sync model discovery and project SDK flows use sync Azure credentials.
+- Updated admin and workspace endpoint editors to use placeholder behavior for stored secrets and to include endpoint IDs in fetch/test requests.
+
+### Secret Handling Behavior
+
+- New or edited endpoint secrets are saved to Key Vault when Key Vault is enabled.
+- Existing saved endpoint secrets are not backfilled automatically.
+- Saved endpoint edits preserve stored secrets unless the auth type changes or a new secret is provided.
+- Obsolete endpoint Key Vault references are deleted when endpoint auth configuration changes or when an endpoint is removed.
+
+## Testing Approach
+
+### Functional Tests
+
+- `functional_tests/test_model_endpoints_key_vault_secret_storage.py`
+- `functional_tests/test_foundry_model_fetch_sync_credentials.py`
+- `functional_tests/test_workspace_multi_endpoints.py`
+- `functional_tests/test_model_endpoint_normalization_backend.py`
+- `functional_tests/test_model_endpoints_api_key_manual_models.py`
+
+### Validation Covered
+
+- Key Vault save, placeholder fetch, value fetch, and cleanup for endpoint auth secrets
+- sync Foundry credential builders returning token objects instead of coroutine values
+- frontend sanitization of endpoint secrets and secret-presence flags
+- backend normalization of endpoint payloads without persisting UI-only flags
+- admin and workspace request wiring for endpoint-ID-based stored-secret resolution
+
+## Impact Analysis
+
+### Before
+
+- MultiGPT endpoint secrets were not handled by the existing Key Vault helper pattern.
+- Saved endpoint tests could fail after reopening the editor because the browser no longer had the secret value.
+- Runtime endpoint consumers did not consistently resolve Key Vault-backed endpoint auth.
+
+### After
+
+- MultiGPT endpoint secrets follow the same secure storage pattern as other Key Vault-backed secret types.
+- Endpoint fetch/test flows work with stored secrets without rehydrating plaintext values into the UI.
+- Foundry model discovery works again for sync fetch routes because sync token acquisition no longer uses async credentials.
+- Runtime consumers resolve endpoint secrets server-side before model access.
+
+## Validation Results
+
+Validated with targeted functional test runs for:
+
+- endpoint Key Vault lifecycle
+- workspace endpoint sanitization
+- endpoint normalization
+- API key manual model entry and per-model test wiring
\ No newline at end of file
diff --git a/docs/explanation/fixes/NEW_CONVERSATION_AGENT_MODEL_REFRESH_FIX.md b/docs/explanation/fixes/NEW_CONVERSATION_AGENT_MODEL_REFRESH_FIX.md
new file mode 100644
index 00000000..d34c54b1
--- /dev/null
+++ b/docs/explanation/fixes/NEW_CONVERSATION_AGENT_MODEL_REFRESH_FIX.md
@@ -0,0 +1,42 @@
+# New Conversation Agent/Model Refresh Fix (0.236.066)
+
+## Fix Title
+Refresh agent and model lists on new conversation creation
+
+## Issue Description
+When starting a new conversation, the agent list did not refresh and depended on the previously selected conversation scope. The model list also did not re-apply user preferences after a new conversation was created.
+
+## Root Cause Analysis
+- New conversations are added without invoking the full selection flow, so the agent dropdown remained stale.
+- Group agents were fetched only when `window.activeGroupId` was set; active group settings in user settings (`activeGroupOid`) were not used.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.066**
+
+## Technical Details
+### Files Modified
+- application/single_app/static/js/agents_common.js
+- application/single_app/static/js/chat/chat-agents.js
+- application/single_app/static/js/chat/chat-retry.js
+- application/single_app/static/js/chat/chat-conversations.js
+- application/single_app/config.py
+- functional_tests/test_new_conversation_agent_model_refresh.py
+
+### Code Changes Summary
+- Group agent fetching now accepts an explicit active group id.
+- New conversation agent dropdown uses `activeGroupOid` from user settings.
+- Model selection is refreshed on new conversation creation and on conversation switches.
+
+## Testing Approach
+- Added a functional test to verify active group fallback and refresh hooks.
+
+## Validation
+### Test Results
+- Functional test: functional_tests/test_new_conversation_agent_model_refresh.py
+
+### User Experience Improvements
+- New conversations consistently show personal + active group agents.
+- Model selection is re-applied to reflect user preferences.
+
+## Related Updates
+- Config version updated to 0.236.066.
diff --git a/docs/explanation/fixes/NEW_FOUNDRY_API_VERSION_HANDLING_FIX.md b/docs/explanation/fixes/NEW_FOUNDRY_API_VERSION_HANDLING_FIX.md
new file mode 100644
index 00000000..31892a19
--- /dev/null
+++ b/docs/explanation/fixes/NEW_FOUNDRY_API_VERSION_HANDLING_FIX.md
@@ -0,0 +1,36 @@
+# NEW_FOUNDRY_API_VERSION_HANDLING_FIX.md
+
+# New Foundry API Version Handling Fix
+
+Fixed in version: **0.239.180**
+
+## Issue
+
+The New Foundry endpoint modal had already stopped defaulting the OpenAI API version to `2024-05-01-preview`, but existing New Foundry agents could still be loaded with that endpoint-level fallback value. This happened both in the edit modal and at runtime, even when the agent document in Cosmos already stored the correct `responses_api_version`.
+
+## Root Cause
+
+- The runtime loader merged endpoint connection settings over the saved New Foundry agent settings and treated the endpoint API version as authoritative.
+- The agent modal re-applied endpoint defaults after loading the saved agent, which overwrote the stored `responses_api_version` in the form.
+- New Foundry agents need endpoint metadata for discovery, but an explicitly saved agent `responses_api_version` must take precedence during edit and invocation.
+
+## Files Modified
+
+- `application/single_app/semantic_kernel_loader.py`
+- `application/single_app/static/js/agent_modal_stepper.js`
+- `application/single_app/static/js/admin/admin_model_endpoints.js`
+- `application/single_app/templates/_multiendpoint_modal.html`
+- `ui_tests/test_model_endpoint_request_uses_endpoint_id.py`
+- `functional_tests/test_new_foundry_endpoint_api_version_handling.py`
+- `application/single_app/config.py`
+
+## Validation
+
+- Functional coverage verifies New Foundry endpoints no longer receive the generic AOAI API-version default.
+- Functional coverage verifies the fetch response can populate the Responses API version back into the agent modal when available.
+- Functional coverage verifies existing New Foundry agents preserve their saved `responses_api_version` when the endpoint configuration contains a different fallback value.
+- UI coverage now expects the endpoint modal to clear the OpenAI API version field when `new_foundry` is selected for a new endpoint.
+
+## Impact
+
+New New Foundry endpoint configurations no longer silently inherit `2024-05-01-preview`, and existing New Foundry agents now keep their saved Responses/OpenAI API version during both modal editing and runtime invocation. Endpoint configuration remains the default source for incomplete agents, but it no longer clobbers an agent-specific version that was already persisted correctly.
\ No newline at end of file
diff --git a/docs/explanation/fixes/NEW_FOUNDRY_UI_VISIBILITY_FIX.md b/docs/explanation/fixes/NEW_FOUNDRY_UI_VISIBILITY_FIX.md
new file mode 100644
index 00000000..2056127f
--- /dev/null
+++ b/docs/explanation/fixes/NEW_FOUNDRY_UI_VISIBILITY_FIX.md
@@ -0,0 +1,39 @@
+# NEW_FOUNDRY_UI_VISIBILITY_FIX.md
+
+# New Foundry UI Visibility Fix
+
+Fixed in version: **0.239.177**
+
+## Issue
+
+New Foundry had already been wired into backend fetch and streaming paths, but the browser UI no longer exposed it in the agent modal or the model endpoint modal. Frontend endpoint sanitization also filtered `new_foundry` out of the visible provider list, which prevented saved New Foundry endpoints from appearing in user-facing workflows.
+
+## Root Cause
+
+- The New Foundry agent type radio in `_agent_modal.html` was wrapped in a disabled `{% if false %}` block.
+- `_multiendpoint_modal.html` only exposed `aoai` and classic Foundry in the provider selector.
+- `is_frontend_visible_model_endpoint_provider()` in `functions_settings.py` still treated `new_foundry` as unsupported for frontend use.
+
+## Files Modified
+
+- `application/single_app/templates/_agent_modal.html`
+- `application/single_app/templates/_multiendpoint_modal.html`
+- `application/single_app/functions_settings.py`
+- `functional_tests/test_chat_tagging_and_endpoint_provider_visibility.py`
+- `functional_tests/test_new_foundry_fetch_support.py`
+- `functional_tests/test_new_foundry_ui_visibility.py`
+- `ui_tests/test_agent_modal_dual_foundry_modes.py`
+- `ui_tests/test_model_endpoint_request_uses_endpoint_id.py`
+- `application/single_app/config.py`
+
+## Validation
+
+- Functional coverage now verifies the New Foundry agent type is present in the agent modal template.
+- Functional coverage now verifies the endpoint modal exposes `new_foundry` and frontend endpoint sanitization allows it.
+- Existing UI tests were updated to expect New Foundry to be visible in both agent and endpoint modal workflows.
+
+## Impact
+
+Users can configure New Foundry endpoints again and select the New Foundry agent type in the browser, which restores the UI path needed to test the REST-based streaming backend.
+
+The agent modal now also displays fetched published versions in the application selector and inherits the Responses API version from the selected endpoint configuration instead of asking the user to type version metadata manually.
\ No newline at end of file
diff --git a/docs/explanation/fixes/NEW_FOUNDRY_VERSION_METADATA_FIX.md b/docs/explanation/fixes/NEW_FOUNDRY_VERSION_METADATA_FIX.md
new file mode 100644
index 00000000..a7a4b150
--- /dev/null
+++ b/docs/explanation/fixes/NEW_FOUNDRY_VERSION_METADATA_FIX.md
@@ -0,0 +1,35 @@
+# NEW_FOUNDRY_VERSION_METADATA_FIX.md
+
+# New Foundry Version Metadata Fix
+
+Fixed in version: **0.239.177**
+
+## Issue
+
+New Foundry application fetches were succeeding, but the returned list was not always surfacing the published version number in the agent selector. That forced the user to infer version metadata manually. The agent modal also exposed a manual version field even though the live Responses invocation path uses the application name and endpoint-level OpenAI API version instead.
+
+## Root Cause
+
+- The REST normalization logic did not read nested version shapes such as `versions.latest.version`.
+- The agent modal treated application version as a user-entered value instead of fetched metadata.
+- The agent modal still used a hardcoded default for the Responses API version instead of inheriting it from the selected endpoint configuration.
+
+## Files Modified
+
+- `application/single_app/foundry_agent_runtime.py`
+- `application/single_app/static/js/agent_modal_stepper.js`
+- `application/single_app/templates/_agent_modal.html`
+- `application/single_app/templates/_multiendpoint_modal.html`
+- `functional_tests/test_new_foundry_fetch_support.py`
+- `functional_tests/test_new_foundry_version_metadata.py`
+- `application/single_app/config.py`
+
+## Validation
+
+- Functional coverage verifies nested New Foundry version metadata is read from the fetched payload.
+- Functional coverage verifies the version field is no longer presented as a manual entry field in the agent modal.
+- Functional coverage verifies the current app version and selector-version formatting are present.
+
+## Impact
+
+Users can fetch New Foundry applications and see the published version in the selector while continuing to configure the runtime by application name. The Responses API version is now inherited from endpoint configuration rather than hardcoded in the agent modal.
\ No newline at end of file
diff --git a/docs/explanation/fixes/PERSONAL_AGENT_DROPDOWN_SCOPE_FIX.md b/docs/explanation/fixes/PERSONAL_AGENT_DROPDOWN_SCOPE_FIX.md
new file mode 100644
index 00000000..51bfd578
--- /dev/null
+++ b/docs/explanation/fixes/PERSONAL_AGENT_DROPDOWN_SCOPE_FIX.md
@@ -0,0 +1,32 @@
+# Personal Agent Dropdown Scope Fix (0.236.062)
+
+## Issue Description
+Personal agents were hidden in the chat dropdown whenever an active group ID existed in global page state, even when the user was chatting in the personal tab.
+
+## Root Cause Analysis
+The dropdown treated any non-empty `activeGroupId` as a group context. Because the chat page always renders `active_group_id`, personal agents were filtered out even in user chat.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.062**
+
+## Technical Details
+- **Files modified**:
+ - application/single_app/static/js/chat/chat-agents.js
+ - application/single_app/static/js/chat/chat-retry.js
+ - application/single_app/config.py
+- **Change summary**:
+ - Determine group scope using `window.activeChatTabType === 'group'` before using `activeGroupId`.
+ - Personal agents now show in the user chat tab; group agents only show in group chat.
+
+## Testing Approach
+- Added functional test: functional_tests/test_personal_agent_dropdown_scope_fix.py
+- Test verifies the group chat scope guard is present in chat dropdown scripts.
+
+## Impact Analysis
+- Restores personal agent visibility in personal chat.
+- Preserves group agent visibility in group chat.
+- No changes to backend agent data.
+
+## Validation
+- Open personal chat and confirm personal + global agents appear.
+- Switch to group chat tab and confirm group + global agents appear.
diff --git a/docs/explanation/fixes/PERSONAL_AGENT_DROPDOWN_VISIBILITY_FIX.md b/docs/explanation/fixes/PERSONAL_AGENT_DROPDOWN_VISIBILITY_FIX.md
new file mode 100644
index 00000000..a4522949
--- /dev/null
+++ b/docs/explanation/fixes/PERSONAL_AGENT_DROPDOWN_VISIBILITY_FIX.md
@@ -0,0 +1,32 @@
+# Personal Agent Dropdown Visibility Fix (0.236.061)
+
+## Issue Description
+Personal agents were missing from the chat agent dropdown when no active group was actually selected. The UI treated string values like "None" as a valid active group ID, so the dropdown only showed group and global agents.
+
+## Root Cause Analysis
+The chat dropdown logic relied on `window.activeGroupId` and treated any non-empty string as a real group ID. When templates rendered `None` as a string, the code mistakenly filtered out personal agents.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.061**
+
+## Technical Details
+- **Files modified**:
+ - application/single_app/static/js/chat/chat-agents.js
+ - application/single_app/static/js/chat/chat-retry.js
+ - application/single_app/config.py
+- **Change summary**:
+ - Normalize `activeGroupId` by treating "none", "null", and "undefined" as empty.
+ - Ensures personal agents are included when no real group is active.
+
+## Testing Approach
+- Added functional test: functional_tests/test_personal_agent_dropdown_in_chats_fix.py
+- Test validates the normalization guard exists in both chat dropdown scripts.
+
+## Impact Analysis
+- Personal agents now appear in the chat dropdown when no active group is selected.
+- Group/global behavior remains unchanged when a real active group ID is present.
+
+## Validation
+- Open chat page with no active group.
+- Confirm personal + global agents appear in the dropdown.
+- Switch to a valid active group and confirm group + global agents appear.
diff --git a/docs/explanation/fixes/PERSONAL_AGENT_USER_ID_SAVE_FIX.md b/docs/explanation/fixes/PERSONAL_AGENT_USER_ID_SAVE_FIX.md
new file mode 100644
index 00000000..32612f32
--- /dev/null
+++ b/docs/explanation/fixes/PERSONAL_AGENT_USER_ID_SAVE_FIX.md
@@ -0,0 +1,32 @@
+# Personal Agent User ID Save Fix (v0.236.050)
+
+## Issue Description
+Personal agents were saved without the `user_id` property, causing them to be missing from user-scoped queries.
+
+## Root Cause Analysis
+The save flow populated `user_id` on a sanitized copy, but persisted the unsanitized payload without `user_id`.
+
+## Version Implemented
+Fixed/Implemented in version: **0.236.050**
+
+## Technical Details
+### Files Modified
+- application/single_app/functions_personal_agents.py
+- application/single_app/config.py
+- functional_tests/test_personal_agent_user_id_saved.py
+
+### Code Changes Summary
+- Assigned `user_id` and metadata to the payload persisted to Cosmos.
+- Incremented the application version.
+
+### Testing Approach
+- Functional test validates `user_id` assignment in the save flow.
+
+## Impact Analysis
+- Personal agents now show up in user-scoped lists and queries.
+
+## Validation
+- Functional test: functional_tests/test_personal_agent_user_id_saved.py
+
+## Reference to Config Version Update
+- Version updated in application/single_app/config.py to **0.236.050**.
diff --git a/docs/explanation/fixes/STREAMING_THOUGHT_PROGRESSION_FIX.md b/docs/explanation/fixes/STREAMING_THOUGHT_PROGRESSION_FIX.md
new file mode 100644
index 00000000..c78ca3bb
--- /dev/null
+++ b/docs/explanation/fixes/STREAMING_THOUGHT_PROGRESSION_FIX.md
@@ -0,0 +1,48 @@
+# Streaming Thought Progression Fix
+
+Fixed/Implemented in version: **0.239.185**
+
+## Overview
+
+This fix restores live thought progression in the chat streaming placeholder without reintroducing the previous-message bleed-through regression.
+
+## Issue Description
+
+After the earlier stale-thought isolation work, the active streaming placeholder could stop advancing through new thought updates. Users would often see one early thought remain visible until the assistant began streaming content, even though later thought events had been recorded for the same reply.
+
+## Root Cause
+
+- The live placeholder relied on message correlation state that was not fully reset per placeholder session.
+- Pending-thought polling still grouped by the most recent message in the conversation window instead of allowing the caller to request thoughts for a specific assistant message.
+- The browser had no dedicated per-placeholder state for deduping and ordering thought updates while still blocking stale events from older messages.
+
+## Files Modified
+
+- `application/single_app/static/js/chat/chat-thoughts.js`
+- `application/single_app/static/js/chat/chat-streaming.js`
+- `application/single_app/functions_thoughts.py`
+- `application/single_app/route_backend_thoughts.py`
+- `functional_tests/test_streaming_thought_finalization.py`
+- `functional_tests/test_pending_thought_message_scoping.py`
+- `ui_tests/test_streaming_thought_progression.py`
+- `application/single_app/config.py`
+
+## Code Changes Summary
+
+- Added explicit per-placeholder reset and dedupe state for live streaming thoughts.
+- Preserved the active assistant message guard so stale thoughts from a prior reply are still ignored.
+- Added an optional `message_id` query parameter to the pending-thought API and backend helper.
+- Returned `message_id` in sanitized thought payloads so callers can verify correlation.
+- Restored live SSE emission for agent/plugin invocation thoughts instead of replaying those updates only after the stream completed.
+- Added regression coverage for browser placeholder updates and message-scoped pending thought queries.
+
+## Testing Approach
+
+- Functional test coverage validates the message-scoped pending-thought backend contract and the updated live-thought frontend hooks.
+- UI test coverage validates that the placeholder advances to the newest thought, does not retain the previous message's thought, and ignores new thought updates once response content starts streaming.
+
+## Impact Analysis
+
+- Streaming replies now keep the latest current-message thought visible until the assistant starts sending response text.
+- Reconnect and fallback callers can request pending thoughts for one assistant message instead of reading whichever message updated most recently.
+- The stale-thought isolation behavior remains in place for consecutive replies.
\ No newline at end of file
diff --git a/docs/explanation/fixes/STREAMING_THOUGHT_STALE_STATUS_FIX.md b/docs/explanation/fixes/STREAMING_THOUGHT_STALE_STATUS_FIX.md
new file mode 100644
index 00000000..7a758ce2
--- /dev/null
+++ b/docs/explanation/fixes/STREAMING_THOUGHT_STALE_STATUS_FIX.md
@@ -0,0 +1,43 @@
+# Streaming Thought Stale Status Fix
+
+## Fix Title
+Streaming thought placeholders now stay scoped to the active assistant response instead of briefly showing the previous response's final thought.
+
+## Issue Description
+In back-to-back streaming chats, the next assistant placeholder could briefly display the final status from the prior response, such as a trailing model completion thought. Users would see the wrong thought badge while the new Semantic Kernel execution was just starting.
+
+## Root Cause Analysis
+- The streaming UI started conversation-level pending-thought polling even though the streaming response already delivered live thought events over SSE.
+- That polling endpoint returns the latest recent thoughts for the conversation, so during the startup gap before the new response wrote its first thought, the client could receive the previous message's final thought.
+- The streaming thought renderer updated a temporary assistant placeholder using a broad temp-message lookup instead of an explicitly tracked active streaming target.
+
+## Version Implemented
+Fixed in version: **0.239.181**
+
+## Files Modified
+| File | Change |
+|------|--------|
+| `application/single_app/static/js/chat/chat-streaming.js` | Removed streaming-mode reliance on pending-thought polling and bound live thought updates to the active temporary assistant placeholder |
+| `application/single_app/static/js/chat/chat-thoughts.js` | Added per-stream thought session tracking and exact placeholder targeting for streaming thought renders |
+| `application/single_app/route_backend_chats.py` | Added `message_id` to streaming thought SSE payloads so live thought events carry stable assistant-message identity |
+| `functional_tests/test_streaming_thought_finalization.py` | Added regression coverage for message-scoped streaming thought updates |
+| `application/single_app/config.py` | Version bump to 0.239.181 |
+
+## Code Changes Summary
+- Started an explicit streaming-thought session when a temporary assistant placeholder is created.
+- Cleared that session on completion, error, or interrupted stream paths.
+- Routed SSE thought events to the current placeholder by exact temporary message ID instead of searching for any `temp_ai_` node.
+- Tracked backend `message_id` values for live thought events and ignored mismatched thought payloads.
+- Stopped starting the conversation-level pending-thought polling flow for streaming chat responses.
+
+## Testing Approach
+- Extended `functional_tests/test_streaming_thought_finalization.py` with assertions covering message-scoped thought rendering and SSE `message_id` payloads.
+
+## Impact Analysis
+- Streaming placeholders should no longer inherit the previous response's terminal thought during quick consecutive prompts.
+- The streaming path now relies on its native SSE thought feed instead of mixing conversation-level polling into the same UI state.
+- The existing content-start guards remain in place, so live thoughts still cannot replace real streamed answer content once content begins.
+
+## Validation
+- Before: a new streaming response could begin with the prior response's final thought badge.
+- After: a new streaming response stays on its neutral placeholder until its own live thoughts arrive, and those thoughts only render on the active placeholder.
\ No newline at end of file
diff --git a/docs/explanation/release_notes.md b/docs/explanation/release_notes.md
index 09224196..51d965ee 100644
--- a/docs/explanation/release_notes.md
+++ b/docs/explanation/release_notes.md
@@ -2,6 +2,36 @@
# Feature Release
+### **(v0.239.158)**
+
+#### Bug Fixes
+
+* **Workspace Agent View Consistency**
+ * Fixed personal and group workspace agent lists so table-view actions now use the same button order, making agent management behavior more predictable across both workspaces.
+ * Fixed group workspace agent grid cards so editable group agents once again show Edit and Delete actions when the current user has permission to manage them.
+ * Fixed personal workspace agent table layout so action buttons stay inside the table instead of overflowing past the Actions column.
+ * (Ref: `workspace.html`, `workspace_agents.js`, `group_agents.js`, `view-utils.js`, `test_workspace_agent_views_consistency.py`)
+
+### **(v0.239.156)**
+
+#### Bug Fixes
+
+* **MultiGPT Endpoint Key Vault Secret Storage and Foundry Fetch Reliability**
+ * MultiGPT endpoint secrets such as API keys and service principal client secrets now move into Azure Key Vault when Key Vault secret storage is enabled, instead of remaining in saved endpoint payloads.
+ * Endpoint fetch, test, Foundry listing, and runtime execution now resolve stored secrets server-side by endpoint ID, so reopening an endpoint no longer depends on the browser still holding plaintext credentials.
+ * Fixed a follow-up regression in Foundry model discovery where sync fetch routes could fail with `'coroutine' object has no attribute 'token'` because async credentials were being reused in a synchronous token acquisition path.
+ * (Ref: `functions_keyvault.py`, `functions_settings.py`, `route_backend_models.py`, `route_frontend_admin_settings.py`, `semantic_kernel_loader.py`, `foundry_agent_runtime.py`, `admin_model_endpoints.js`, `workspace_model_endpoints.js`, `test_model_endpoints_key_vault_secret_storage.py`, `test_foundry_model_fetch_sync_credentials.py`)
+
+### **(v0.239.153)**
+
+#### Bug Fixes
+
+* **Live Tool Invocation Thoughts During Streaming**
+ * Updated plugin thought handling so the chat can surface an immediate `Invoking Plugin.Function` thought as soon as a tool starts, instead of waiting until the tool completes.
+ * Streaming chat now polls pending thoughts while the response is still in flight, allowing the active status badge to switch from model-sending text to the currently executing plugin call during long-running tools such as `WaitPlugin.wait`.
+ * Completed plugin thoughts still include the richer human-readable summaries for wait, math, and generic plugin executions, and broader plugin coverage remains enabled through auto-wrapping for manifest-loaded plugins.
+ * (Ref: `plugin_invocation_logger.py`, `plugin_invocation_thoughts.py`, `chat-thoughts.js`, `chat-streaming.js`, `logged_plugin_loader.py`, `test_logged_core_plugins.py`)
+
### **(v0.240.001)**
#### Bug Fixes
diff --git a/functional_tests/test_admin_action_activity_log.py b/functional_tests/test_admin_action_activity_log.py
new file mode 100644
index 00000000..5530f4c1
--- /dev/null
+++ b/functional_tests/test_admin_action_activity_log.py
@@ -0,0 +1,34 @@
+# test_admin_action_activity_log.py
+#!/usr/bin/env python3
+"""
+Functional test for general admin action logging.
+Version: 0.236.017
+Implemented in: 0.236.017
+
+This test ensures a helper exists for logging general admin actions with
+admin identity fields and a description for activity log display.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_admin_action_activity_log_helper():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ log_path = os.path.join(repo_root, 'application', 'single_app', 'functions_activity_logging.py')
+ content = read_file_text(log_path)
+
+ assert 'def log_general_admin_action' in content, "Missing admin action logging helper."
+ assert "activity_type': 'admin_action'" in content, "Expected admin_action activity type."
+ assert "'admin':" in content, "Expected admin identity metadata in activity record."
+ assert "'description':" in content, "Expected description for activity display."
+
+ print("✅ Admin action activity logging helper verified.")
+
+
+if __name__ == "__main__":
+ test_admin_action_activity_log_helper()
diff --git a/functional_tests/test_agent_dropdown_scope_by_conversation_metadata.py b/functional_tests/test_agent_dropdown_scope_by_conversation_metadata.py
new file mode 100644
index 00000000..7ad3e187
--- /dev/null
+++ b/functional_tests/test_agent_dropdown_scope_by_conversation_metadata.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python3
+"""
+Functional test for agent dropdown scope by conversation metadata.
+Version: 0.236.063
+Implemented in: 0.236.063
+
+This test ensures the chat agent dropdown derives scope from the active
+conversation's data-chat-type and shows all agents for new conversations.
+"""
+
+import os
+import sys
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
+
+def _read_file(path):
+ with open(path, "r", encoding="utf-8") as handle:
+ return handle.read()
+
+
+def _assert_metadata_scope_guard(js_text, file_label):
+ required_snippets = [
+ "data-chat-type",
+ "conversationScope",
+ "orderedAgents",
+ ]
+ missing = [snippet for snippet in required_snippets if snippet not in js_text]
+ if missing:
+ raise AssertionError(
+ f"Missing conversation metadata scope guard in {file_label}: {', '.join(missing)}"
+ )
+
+
+def test_agent_dropdown_scope_guard():
+ """Verify dropdown scope logic uses conversation metadata."""
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ chat_agents_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "chat",
+ "chat-agents.js",
+ )
+ chat_retry_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "chat",
+ "chat-retry.js",
+ )
+
+ chat_agents_js = _read_file(chat_agents_path)
+ chat_retry_js = _read_file(chat_retry_path)
+
+ _assert_metadata_scope_guard(chat_agents_js, "chat-agents.js")
+ _assert_metadata_scope_guard(chat_retry_js, "chat-retry.js")
+
+ print("✅ Agent dropdown scope uses conversation metadata")
+ return True
+
+
+if __name__ == "__main__":
+ success = test_agent_dropdown_scope_guard()
+ sys.exit(0 if success else 1)
diff --git a/functional_tests/test_agent_gpt_init_skips_multiendpoint.py b/functional_tests/test_agent_gpt_init_skips_multiendpoint.py
new file mode 100644
index 00000000..57acc652
--- /dev/null
+++ b/functional_tests/test_agent_gpt_init_skips_multiendpoint.py
@@ -0,0 +1,58 @@
+# test_agent_gpt_init_skips_multiendpoint.py
+#!/usr/bin/env python3
+"""
+Functional test for agent GPT init gating.
+Version: 0.236.052
+Implemented in: 0.236.052
+
+This test ensures agent requests skip multi-endpoint GPT resolution and
+default APIM deployment selection when model_deployment is not provided.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file:
+ return file.read()
+
+
+def test_agent_gpt_init_gating():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ chat_path = os.path.join(repo_root, "application", "single_app", "route_backend_chats.py")
+
+ chat_content = read_file_text(chat_path)
+
+ assert "Skipping multi-endpoint resolution because agent_info is provided" in chat_content, (
+ "Expected agent requests to skip multi-endpoint GPT resolution."
+ )
+ assert "Agent request without model_deployment; defaulting to first APIM deployment" in chat_content, (
+ "Expected APIM defaulting behavior for agent requests without model_deployment."
+ )
+
+ print("✅ Agent GPT init gating verified.")
+
+
+def run_tests():
+ tests = [test_agent_gpt_init_gating]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ raise SystemExit(0 if run_tests() else 1)
diff --git a/functional_tests/test_agent_modal_model_endpoint_filtering.py b/functional_tests/test_agent_modal_model_endpoint_filtering.py
new file mode 100644
index 00000000..1083e77e
--- /dev/null
+++ b/functional_tests/test_agent_modal_model_endpoint_filtering.py
@@ -0,0 +1,64 @@
+# test_agent_modal_model_endpoint_filtering.py
+"""
+Functional test for agent modal model endpoint filtering.
+Version: 0.236.056
+Implemented in: 0.236.056
+
+This test ensures the agent modal dropdown includes non-AOAI providers for local
+agents and normalizes model IDs/display names when building the model list.
+"""
+
+import os
+import sys
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
+
+def test_agent_modal_model_endpoint_filtering():
+ """Validate agent modal dropdown logic for model endpoints."""
+ print("🔍 Validating agent modal model endpoint filtering...")
+
+ try:
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ js_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "agents_common.js",
+ )
+
+ if not os.path.exists(js_path):
+ raise FileNotFoundError(f"agents_common.js not found at {js_path}")
+
+ with open(js_path, "r", encoding="utf-8") as handle:
+ content = handle.read()
+
+ required_snippets = [
+ "agentType === 'aifoundry' && provider !== 'aifoundry'",
+ "const modelId = model.id",
+ "const displayName = model.displayName",
+ "display_name: displayName",
+ ]
+
+ missing = [snippet for snippet in required_snippets if snippet not in content]
+ if missing:
+ raise AssertionError(
+ f"Missing expected filtering/normalization snippets: {', '.join(missing)}"
+ )
+
+ print("✅ Agent modal model endpoint filtering logic present.")
+ return True
+
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ return False
+
+
+if __name__ == "__main__":
+ success = test_agent_modal_model_endpoint_filtering()
+ sys.exit(0 if success else 1)
diff --git a/functional_tests/test_agent_modal_multiendpoint_foundry_advanced_notice.py b/functional_tests/test_agent_modal_multiendpoint_foundry_advanced_notice.py
new file mode 100644
index 00000000..e6a30cc1
--- /dev/null
+++ b/functional_tests/test_agent_modal_multiendpoint_foundry_advanced_notice.py
@@ -0,0 +1,65 @@
+# test_agent_modal_multiendpoint_foundry_advanced_notice.py
+#!/usr/bin/env python3
+"""
+Functional test for agent modal multi-endpoint and Foundry advanced notice.
+Version: 0.236.054
+Implemented in: 0.236.054
+
+This test ensures the agent modal hides custom connection fields when
+multi-endpoint model management is enabled and shows an advanced
+settings notice for Azure AI Foundry agents.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file:
+ return file.read()
+
+
+def test_agent_modal_multiendpoint_and_foundry_notice():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ modal_path = os.path.join(repo_root, "application", "single_app", "templates", "_agent_modal.html")
+ stepper_path = os.path.join(repo_root, "application", "single_app", "static", "js", "agent_modal_stepper.js")
+
+ modal_content = read_file_text(modal_path)
+ stepper_content = read_file_text(stepper_path)
+
+ assert "agent-custom-connection-toggle" in modal_content, "Expected custom connection toggle markup."
+ assert "settings.enable_multi_model_endpoints" in modal_content, (
+ "Expected Jinja gating for custom connection fields when multi-endpoint is enabled."
+ )
+ assert "agent-advanced-foundry-note" in modal_content, (
+ "Expected Foundry advanced settings notice in the modal."
+ )
+ assert "agent-advanced-foundry-note" in stepper_content, (
+ "Expected JS toggling for Foundry advanced settings notice."
+ )
+
+ print("✅ Agent modal multi-endpoint and Foundry advanced notice verified.")
+
+
+def run_tests():
+ tests = [test_agent_modal_multiendpoint_and_foundry_notice]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ raise SystemExit(0 if run_tests() else 1)
diff --git a/functional_tests/test_agent_schema_full_validation.py b/functional_tests/test_agent_schema_full_validation.py
new file mode 100644
index 00000000..df1466a3
--- /dev/null
+++ b/functional_tests/test_agent_schema_full_validation.py
@@ -0,0 +1,101 @@
+# test_agent_schema_full_validation.py
+#!/usr/bin/env python3
+"""
+Functional test for full agent schema validation.
+Version: 0.236.049
+Implemented in: 0.236.049
+
+This test ensures that validate_agent uses the full Draft 7 schema
+so internal definitions resolve correctly for Foundry settings.
+"""
+
+import copy
+import os
+import sys
+
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'application', 'single_app'))
+
+from json_schema_validation import validate_agent
+
+
+def build_valid_foundry_agent():
+ """Build a valid Azure AI Foundry agent payload."""
+ return {
+ "id": "123e4567-e89b-12d3-a456-426614174000",
+ "user_id": "test-user-123",
+ "last_updated": "2025-01-30T00:00:00Z",
+ "name": "foundry_agent_1",
+ "display_name": "Foundry Agent",
+ "description": "Valid Foundry agent payload for schema validation.",
+ "azure_openai_gpt_endpoint": "https://example.openai.azure.com",
+ "azure_openai_gpt_deployment": "gpt-4o",
+ "azure_openai_gpt_api_version": "2024-10-01-preview",
+ "agent_type": "aifoundry",
+ "instructions": "You are a helpful test agent.",
+ "actions_to_load": [],
+ "other_settings": {
+ "azure_ai_foundry": {
+ "agent_id": "agent-123"
+ }
+ },
+ "max_completion_tokens": 2048,
+ "is_global": False,
+ "is_group": False
+ }
+
+
+def test_valid_agent_schema():
+ """Ensure valid Foundry agent passes schema validation."""
+ print("\n🔍 Testing valid Foundry agent schema validation...")
+ try:
+ agent = build_valid_foundry_agent()
+ result = validate_agent(agent)
+ if result is None:
+ print("✅ Valid Foundry agent passed schema validation.")
+ return True
+
+ print(f"❌ Validation failed unexpectedly: {result}")
+ return False
+ except Exception as exc:
+ print(f"❌ Unexpected exception during validation: {exc}")
+ import traceback
+ traceback.print_exc()
+ return False
+
+
+def test_invalid_agent_missing_foundry_agent_id():
+ """Ensure missing Foundry agent_id triggers validation error."""
+ print("\n🔍 Testing Foundry agent schema validation with missing agent_id...")
+ try:
+ agent = build_valid_foundry_agent()
+ agent_missing_id = copy.deepcopy(agent)
+ agent_missing_id["other_settings"]["azure_ai_foundry"].pop("agent_id", None)
+
+ result = validate_agent(agent_missing_id)
+ if result:
+ print("✅ Missing agent_id correctly failed schema validation.")
+ return True
+
+ print("❌ Validation unexpectedly succeeded for missing agent_id.")
+ return False
+ except Exception as exc:
+ print(f"❌ Unexpected exception during validation: {exc}")
+ import traceback
+ traceback.print_exc()
+ return False
+
+
+if __name__ == "__main__":
+ tests = [
+ test_valid_agent_schema,
+ test_invalid_agent_missing_foundry_agent_id
+ ]
+
+ results = []
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ results.append(test())
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ sys.exit(0 if success else 1)
diff --git a/functional_tests/test_agent_schema_ref_resolution.py b/functional_tests/test_agent_schema_ref_resolution.py
new file mode 100644
index 00000000..e176050e
--- /dev/null
+++ b/functional_tests/test_agent_schema_ref_resolution.py
@@ -0,0 +1,53 @@
+# test_agent_schema_ref_resolution.py
+#!/usr/bin/env python3
+"""
+Functional test for agent schema reference resolution.
+Version: 0.236.049
+Implemented in: 0.236.049
+
+This test ensures agent schema validation uses the root schema so $ref
+entries like OtherSettings resolve correctly.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file:
+ return file.read()
+
+
+def test_agent_schema_ref_resolution():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ validation_path = os.path.join(repo_root, "application", "single_app", "json_schema_validation.py")
+ content = read_file_text(validation_path)
+
+ assert "Draft7Validator(schema" in content, "Expected agent schema validation to use the root schema."
+ assert "RefResolver.from_schema(schema)" in content, "Expected schema ref resolver wiring."
+
+ print("✅ Agent schema ref resolution wiring verified.")
+
+
+def run_tests():
+ tests = [test_agent_schema_ref_resolution]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ raise SystemExit(0 if run_tests() else 1)
diff --git a/functional_tests/test_agent_selection_recovery.py b/functional_tests/test_agent_selection_recovery.py
new file mode 100644
index 00000000..56d7e529
--- /dev/null
+++ b/functional_tests/test_agent_selection_recovery.py
@@ -0,0 +1,77 @@
+# test_agent_selection_recovery.py
+#!/usr/bin/env python3
+"""
+Functional test for per-user agent loading and selection safeguards.
+Version: 0.239.173
+Implemented in: 0.239.173
+
+This test ensures per-user agent loading preserves personal agents during
+global merge and invalid persisted agent selections are rejected when saved.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file:
+ return file.read()
+
+
+def test_agent_selection_recovery_wiring():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ loader_path = os.path.join(
+ repo_root, "application", "single_app", "semantic_kernel_loader.py"
+ )
+ agents_route_path = os.path.join(
+ repo_root, "application", "single_app", "route_backend_agents.py"
+ )
+ chats_route_path = os.path.join(
+ repo_root, "application", "single_app", "route_backend_chats.py"
+ )
+
+ loader_text = read_file_text(loader_path)
+ agents_route_text = read_file_text(agents_route_path)
+ chats_route_text = read_file_text(chats_route_path)
+
+ assert "agents_cfg = get_personal_agents(user_id)" in loader_text, (
+ "Expected semantic kernel loader to initialize candidate agents from personal agents."
+ )
+ assert "agents_cfg = []" not in loader_text.split("agents_cfg = get_personal_agents(user_id)", 1)[1][:2500], (
+ "Unexpected reset of agents_cfg after personal agents are loaded."
+ )
+ assert "Selected agent is not available for this user or scope." in agents_route_text, (
+ "Expected selected-agent endpoint to reject invalid selections."
+ )
+ assert "def _find_matching_user_selected_agent" in agents_route_text, (
+ "Expected user selected-agent matching helper in route_backend_agents.py."
+ )
+ assert "if isinstance(selected_agent_info, dict):" in chats_route_text, (
+ "Expected chat route to normalize dict-based selected_agent settings."
+ )
+
+ print("✅ Agent loading and selection wiring verified.")
+
+
+def run_tests():
+ tests = [test_agent_selection_recovery_wiring]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ raise SystemExit(0 if run_tests() else 1)
\ No newline at end of file
diff --git a/functional_tests/test_ai_models_tab_embedding_image_location.py b/functional_tests/test_ai_models_tab_embedding_image_location.py
new file mode 100644
index 00000000..21f60c9e
--- /dev/null
+++ b/functional_tests/test_ai_models_tab_embedding_image_location.py
@@ -0,0 +1,46 @@
+# test_ai_models_tab_embedding_image_location.py
+#!/usr/bin/env python3
+"""
+Functional test for AI Models tab placement of embeddings and image generation sections.
+Version: 0.236.014
+Implemented in: 0.236.014
+
+This test ensures embeddings and image generation settings remain on the AI Models tab
+and are not nested inside the legacy model modal.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_ai_models_tab_embedding_image_location():
+ """Verify embeddings and image generation sections are outside legacy modal markup."""
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ template_path = os.path.join(repo_root, 'application', 'single_app', 'templates', 'admin_settings.html')
+
+ content = read_file_text(template_path)
+
+ embeddings_marker = 'id="embeddings-configuration"'
+ image_marker = 'id="image-generation-configuration"'
+ legacy_modal_marker = 'id="legacyModelSettingsModal"'
+
+ assert embeddings_marker in content, "Embeddings configuration section is missing."
+ assert image_marker in content, "Image generation configuration section is missing."
+ assert legacy_modal_marker in content, "Legacy model modal is missing."
+
+ embeddings_index = content.index(embeddings_marker)
+ image_index = content.index(image_marker)
+ legacy_modal_index = content.index(legacy_modal_marker)
+
+ assert embeddings_index < legacy_modal_index, "Embeddings section should be outside legacy modal."
+ assert image_index < legacy_modal_index, "Image generation section should be outside legacy modal."
+
+ print("✅ Embeddings and image generation sections are on the AI Models tab.")
+
+
+if __name__ == "__main__":
+ test_ai_models_tab_embedding_image_location()
diff --git a/functional_tests/test_approval_notification_routing_fix.py b/functional_tests/test_approval_notification_routing_fix.py
new file mode 100644
index 00000000..f198ceda
--- /dev/null
+++ b/functional_tests/test_approval_notification_routing_fix.py
@@ -0,0 +1,539 @@
+#!/usr/bin/env python3
+# test_approval_notification_routing_fix.py
+"""
+Functional test for approval notification routing, cleanup, and template activity logging.
+Version: 0.239.162
+Implemented in: 0.239.159
+
+This test ensures that approval requests notify submitters and reviewers,
+that reviewer pending notifications are cleared when a decision is made,
+and that agent template review outcomes notify the original submitter.
+"""
+
+import copy
+import os
+import sys
+
+from azure.cosmos import exceptions
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'application', 'single_app'))
+
+
+class FakeNotificationContainer:
+ """In-memory Cosmos-like container for notification tests."""
+
+ def __init__(self):
+ self.items = {}
+
+ def create_item(self, item):
+ self.items[item['id']] = copy.deepcopy(item)
+ return copy.deepcopy(item)
+
+ def upsert_item(self, item):
+ self.items[item['id']] = copy.deepcopy(item)
+ return copy.deepcopy(item)
+
+ def delete_item(self, item=None, partition_key=None):
+ item_id = item if item is not None else partition_key
+ if item_id not in self.items:
+ raise exceptions.CosmosResourceNotFoundError(message='Notification not found')
+ del self.items[item_id]
+
+ def query_items(self, query=None, parameters=None, partition_key=None, enable_cross_partition_query=False):
+ results = [copy.deepcopy(item) for item in self.items.values()]
+ parameter_map = {param['name']: param['value'] for param in (parameters or [])}
+
+ if "c.scope = 'assignment'" in (query or ''):
+ results = [item for item in results if item.get('scope') == 'assignment']
+
+ if '@notification_id' in parameter_map:
+ results = [item for item in results if item.get('id') == parameter_map['@notification_id']]
+
+ if '@user_id' in parameter_map:
+ results = [item for item in results if item.get('user_id') == parameter_map['@user_id']]
+
+ if '@group_id' in parameter_map:
+ results = [item for item in results if item.get('group_id') == parameter_map['@group_id']]
+
+ notification_types = [
+ value
+ for name, value in parameter_map.items()
+ if name.startswith('@notification_type')
+ ]
+ if notification_types:
+ results = [
+ item for item in results
+ if item.get('notification_type') in notification_types
+ ]
+
+ for metadata_key in ('approval_id', 'template_id'):
+ parameter_name = f'@metadata_{metadata_key}'
+ if parameter_name in parameter_map:
+ results = [
+ item for item in results
+ if item.get('metadata', {}).get(metadata_key) == parameter_map[parameter_name]
+ ]
+
+ return results
+
+
+class FakeApprovalsContainer:
+ """In-memory Cosmos-like container for approval documents."""
+
+ def __init__(self):
+ self.items = {}
+
+ def create_item(self, body=None, **kwargs):
+ item = body or kwargs.get('item')
+ self.items[item['id']] = copy.deepcopy(item)
+ return copy.deepcopy(item)
+
+ def upsert_item(self, item):
+ self.items[item['id']] = copy.deepcopy(item)
+ return copy.deepcopy(item)
+
+ def read_item(self, item=None, partition_key=None):
+ if item not in self.items:
+ raise exceptions.CosmosResourceNotFoundError(message='Approval not found')
+ stored = self.items[item]
+ if partition_key != stored.get('group_id'):
+ raise exceptions.CosmosResourceNotFoundError(message='Approval partition mismatch')
+ return copy.deepcopy(stored)
+
+
+class FakeTemplateContainer:
+ """In-memory Cosmos-like container for agent template documents."""
+
+ def __init__(self):
+ self.items = {}
+
+ def upsert_item(self, item):
+ self.items[item['id']] = copy.deepcopy(item)
+ return copy.deepcopy(item)
+
+ def read_item(self, item=None, partition_key=None):
+ if item not in self.items:
+ raise exceptions.CosmosResourceNotFoundError(message='Template not found')
+ stored = self.items[item]
+ if partition_key != stored.get('id'):
+ raise exceptions.CosmosResourceNotFoundError(message='Template partition mismatch')
+ return copy.deepcopy(stored)
+
+ def delete_item(self, item=None, partition_key=None):
+ if item not in self.items:
+ raise exceptions.CosmosResourceNotFoundError(message='Template not found')
+ del self.items[item]
+
+
+class FakeActivityLogsContainer:
+ """In-memory Cosmos-like container for activity log tests."""
+
+ def __init__(self):
+ self.items = {}
+
+ def create_item(self, body=None, **kwargs):
+ item = body or kwargs.get('item')
+ self.items[item['id']] = copy.deepcopy(item)
+ return copy.deepcopy(item)
+
+
+def _notification_types(container, metadata_key, metadata_value):
+ return sorted([
+ item.get('notification_type')
+ for item in container.items.values()
+ if item.get('metadata', {}).get(metadata_key) == metadata_value
+ ])
+
+
+def _notifications(container, metadata_key, metadata_value):
+ return [
+ copy.deepcopy(item)
+ for item in container.items.values()
+ if item.get('metadata', {}).get(metadata_key) == metadata_value
+ ]
+
+
+def _activity_logs(container, activity_type):
+ return [
+ copy.deepcopy(item)
+ for item in container.items.values()
+ if item.get('activity_type') == activity_type
+ ]
+
+
+def test_standard_approval_notifications_and_cleanup():
+ """Verify submitter/admin notifications and cleanup for standard approvals."""
+ print('🔍 Testing standard approval notification lifecycle...')
+
+ import functions_approvals
+ import functions_notifications
+
+ notification_container = FakeNotificationContainer()
+ approval_container = FakeApprovalsContainer()
+ group_doc = {
+ 'id': 'group-1',
+ 'name': 'Operations Group',
+ 'owner': {
+ 'id': 'group-owner-1',
+ 'email': 'owner@example.com',
+ 'displayName': 'Group Owner'
+ }
+ }
+
+ originals = {
+ 'notification_container': functions_notifications.cosmos_notifications_container,
+ 'approval_container': functions_approvals.cosmos_approvals_container,
+ 'find_group_by_id': functions_approvals.find_group_by_id,
+ 'approvals_log_event': functions_approvals.log_event,
+ }
+
+ functions_notifications.cosmos_notifications_container = notification_container
+ functions_approvals.cosmos_approvals_container = approval_container
+ functions_approvals.find_group_by_id = lambda group_id: copy.deepcopy(group_doc) if group_id == group_doc['id'] else None
+ functions_approvals.log_event = lambda *args, **kwargs: None
+
+ try:
+ approved_request = functions_approvals.create_approval_request(
+ request_type=functions_approvals.TYPE_DELETE_GROUP,
+ group_id='group-1',
+ requester_id='requester-1',
+ requester_email='requester@example.com',
+ requester_name='Requester One',
+ reason='Please remove this unused group.',
+ metadata={'entity_type': 'group'}
+ )
+
+ pending_types = _notification_types(notification_container, 'approval_id', approved_request['id'])
+ if pending_types.count('approval_request_pending') != 1 or pending_types.count('approval_request_pending_submitter') != 1:
+ print(f'❌ Unexpected pending notification set: {pending_types}')
+ return False
+
+ functions_approvals.approve_request(
+ approval_id=approved_request['id'],
+ group_id='group-1',
+ approver_id='admin-1',
+ approver_email='admin@example.com',
+ approver_name='Admin Reviewer',
+ comment='Looks good.'
+ )
+
+ approved_types = _notification_types(notification_container, 'approval_id', approved_request['id'])
+ if 'approval_request_pending' in approved_types:
+ print(f'❌ Pending admin notification was not cleared after approval: {approved_types}')
+ return False
+ if 'approval_request_approved' not in approved_types:
+ print(f'❌ Approved notification missing after approval: {approved_types}')
+ return False
+
+ denied_request = functions_approvals.create_approval_request(
+ request_type=functions_approvals.TYPE_DELETE_DOCUMENTS,
+ group_id='group-1',
+ requester_id='requester-2',
+ requester_email='requester2@example.com',
+ requester_name='Requester Two',
+ reason='Clear legacy documents.',
+ metadata={'entity_type': 'group'}
+ )
+
+ functions_approvals.deny_request(
+ approval_id=denied_request['id'],
+ group_id='group-1',
+ denier_id='admin-2',
+ denier_email='admin2@example.com',
+ denier_name='Admin Denier',
+ comment='Need more details.',
+ auto_denied=False
+ )
+
+ denied_types = _notification_types(notification_container, 'approval_id', denied_request['id'])
+ if 'approval_request_pending' in denied_types:
+ print(f'❌ Pending admin notification was not cleared after denial: {denied_types}')
+ return False
+ if 'approval_request_denied' not in denied_types:
+ print(f'❌ Denied notification missing after denial: {denied_types}')
+ return False
+
+ denied_notifications = _notifications(notification_container, 'approval_id', denied_request['id'])
+ denied_message = next(
+ (
+ item.get('message', '')
+ for item in denied_notifications
+ if item.get('notification_type') == 'approval_request_denied'
+ ),
+ ''
+ )
+ if 'Need more details.' not in denied_message:
+ print(f'❌ Denied approval notification did not include reason: {denied_message}')
+ return False
+
+ print('✅ Standard approvals notify submitters and clear reviewer pending notifications')
+ return True
+ finally:
+ functions_notifications.cosmos_notifications_container = originals['notification_container']
+ functions_approvals.cosmos_approvals_container = originals['approval_container']
+ functions_approvals.find_group_by_id = originals['find_group_by_id']
+ functions_approvals.log_event = originals['approvals_log_event']
+
+
+def test_agent_template_review_notifications_and_cleanup():
+ """Verify template review notifications for approve, reject, and delete paths."""
+ print('🔍 Testing agent template approval notification lifecycle...')
+
+ import functions_agent_templates
+ import functions_activity_logging
+ import functions_notifications
+
+ notification_container = FakeNotificationContainer()
+ template_container = FakeTemplateContainer()
+ activity_container = FakeActivityLogsContainer()
+
+ originals = {
+ 'notification_container': functions_notifications.cosmos_notifications_container,
+ 'template_container': functions_agent_templates.cosmos_agent_templates_container,
+ 'template_log_event': functions_agent_templates.log_event,
+ 'activity_container': functions_activity_logging.cosmos_activity_logs_container,
+ 'activity_log_event': functions_activity_logging.log_event,
+ }
+
+ functions_notifications.cosmos_notifications_container = notification_container
+ functions_agent_templates.cosmos_agent_templates_container = template_container
+ functions_agent_templates.log_event = lambda *args, **kwargs: None
+ functions_activity_logging.cosmos_activity_logs_container = activity_container
+ functions_activity_logging.log_event = lambda *args, **kwargs: None
+
+ submitter = {
+ 'userId': 'template-user-1',
+ 'email': 'template-user@example.com',
+ 'displayName': 'Template Submitter'
+ }
+ admin = {
+ 'userId': 'template-admin-1',
+ 'email': 'template-admin@example.com',
+ 'displayName': 'Template Admin'
+ }
+
+ try:
+ approved_template = functions_agent_templates.create_agent_template(
+ payload={
+ 'title': 'Agent One',
+ 'display_name': 'Agent One',
+ 'description': 'Test agent for approval.',
+ 'instructions': 'Always be helpful.',
+ 'source_scope': 'personal'
+ },
+ user_info=submitter,
+ auto_approve=False
+ )
+
+ initial_types = _notification_types(notification_container, 'template_id', approved_template['id'])
+ if initial_types.count('agent_template_pending_admin') != 1 or initial_types.count('agent_template_pending_submitter') != 1:
+ print(f'❌ Unexpected pending template notifications: {initial_types}')
+ return False
+ if not _activity_logs(activity_container, 'agent_template_submission'):
+ print('❌ Template submission activity log missing')
+ return False
+
+ functions_agent_templates.approve_agent_template(approved_template['id'], admin, notes='Approved for gallery.')
+ approved_types = _notification_types(notification_container, 'template_id', approved_template['id'])
+ if 'agent_template_pending_admin' in approved_types:
+ print(f'❌ Pending admin template notification not cleared after approval: {approved_types}')
+ return False
+ if 'agent_template_approved' not in approved_types:
+ print(f'❌ Approved template notification missing: {approved_types}')
+ return False
+ approval_logs = _activity_logs(activity_container, 'agent_template_approval')
+ if not approval_logs:
+ print('❌ Template approval activity log missing')
+ return False
+
+ rejected_template = functions_agent_templates.create_agent_template(
+ payload={
+ 'title': 'Agent Two',
+ 'display_name': 'Agent Two',
+ 'description': 'Test agent for rejection.',
+ 'instructions': 'Never leak secrets.',
+ 'source_scope': 'personal'
+ },
+ user_info=submitter,
+ auto_approve=False
+ )
+
+ functions_agent_templates.reject_agent_template(
+ rejected_template['id'],
+ admin,
+ reason='Needs clearer instructions.',
+ notes='Please simplify the prompt.'
+ )
+ rejected_types = _notification_types(notification_container, 'template_id', rejected_template['id'])
+ if 'agent_template_pending_admin' in rejected_types:
+ print(f'❌ Pending admin template notification not cleared after rejection: {rejected_types}')
+ return False
+ if 'agent_template_rejected' not in rejected_types:
+ print(f'❌ Rejected template notification missing: {rejected_types}')
+ return False
+
+ rejected_notifications = _notifications(notification_container, 'template_id', rejected_template['id'])
+ rejected_message = next(
+ (
+ item.get('message', '')
+ for item in rejected_notifications
+ if item.get('notification_type') == 'agent_template_rejected'
+ ),
+ ''
+ )
+ if 'Needs clearer instructions.' not in rejected_message:
+ print(f'❌ Rejected template notification did not include reason: {rejected_message}')
+ return False
+ rejection_logs = _activity_logs(activity_container, 'agent_template_rejection')
+ if not rejection_logs:
+ print('❌ Template rejection activity log missing')
+ return False
+ if rejection_logs[-1].get('review_reason') != 'Needs clearer instructions.':
+ print(f"❌ Template rejection activity log missing review reason: {rejection_logs[-1]}")
+ return False
+
+ deleted_template = functions_agent_templates.create_agent_template(
+ payload={
+ 'title': 'Agent Three',
+ 'display_name': 'Agent Three',
+ 'description': 'Test agent for deletion.',
+ 'instructions': 'Be concise.',
+ 'source_scope': 'personal'
+ },
+ user_info=submitter,
+ auto_approve=False
+ )
+
+ deleted = functions_agent_templates.delete_agent_template(deleted_template['id'], actor_info=admin)
+ if not deleted:
+ print('❌ Expected template deletion to succeed')
+ return False
+
+ deleted_types = _notification_types(notification_container, 'template_id', deleted_template['id'])
+ if 'agent_template_pending_admin' in deleted_types:
+ print(f'❌ Pending admin template notification not cleared after deletion: {deleted_types}')
+ return False
+ if 'agent_template_deleted' not in deleted_types:
+ print(f'❌ Deleted template notification missing: {deleted_types}')
+ return False
+ if not _activity_logs(activity_container, 'agent_template_deletion'):
+ print('❌ Template deletion activity log missing')
+ return False
+
+ print('✅ Agent template review notifications route to submitters and clear stale admin pending notices')
+ return True
+ finally:
+ functions_notifications.cosmos_notifications_container = originals['notification_container']
+ functions_agent_templates.cosmos_agent_templates_container = originals['template_container']
+ functions_agent_templates.log_event = originals['template_log_event']
+ functions_activity_logging.cosmos_activity_logs_container = originals['activity_container']
+ functions_activity_logging.log_event = originals['activity_log_event']
+
+
+def test_notification_display_backfills_rejection_reasons_from_metadata():
+ """Verify notification reads append reasons for older generic rejection messages."""
+ print('🔍 Testing notification display fallback for rejection reasons...')
+
+ import functions_notifications
+ import functions_group
+ import functions_public_workspaces
+
+ notification_container = FakeNotificationContainer()
+ notification_container.create_item({
+ 'id': 'approval-denied-legacy',
+ 'user_id': 'legacy-user',
+ 'group_id': None,
+ 'public_workspace_id': None,
+ 'scope': 'personal',
+ 'notification_type': 'approval_request_denied',
+ 'title': 'Request Denied',
+ 'message': 'Your request was denied by break glass.',
+ 'created_at': '2026-03-25T15:00:00+00:00',
+ 'ttl': 100,
+ 'read_by': [],
+ 'dismissed_by': [],
+ 'link_url': '/approvals',
+ 'link_context': {'approval_id': 'approval-legacy'},
+ 'metadata': {
+ 'approval_id': 'approval-legacy',
+ 'comment': 'Testing the rejection reason in notifications'
+ },
+ 'assignment': None
+ })
+ notification_container.create_item({
+ 'id': 'template-rejected-legacy',
+ 'user_id': 'legacy-user',
+ 'group_id': None,
+ 'public_workspace_id': None,
+ 'scope': 'personal',
+ 'notification_type': 'agent_template_rejected',
+ 'title': 'Template Declined: pa-gle',
+ 'message': "Your template 'pa-gle' was declined by break glass.",
+ 'created_at': '2026-03-25T15:01:00+00:00',
+ 'ttl': 100,
+ 'read_by': [],
+ 'dismissed_by': [],
+ 'link_url': '/workspace',
+ 'link_context': {'template_id': 'template-legacy'},
+ 'metadata': {
+ 'template_id': 'template-legacy',
+ 'rejection_reason': 'Testing the rejection reason in notifications'
+ },
+ 'assignment': None
+ })
+
+ originals = {
+ 'notification_container': functions_notifications.cosmos_notifications_container,
+ 'get_user_groups': functions_group.get_user_groups,
+ 'get_user_public_workspaces': functions_public_workspaces.get_user_public_workspaces,
+ }
+
+ functions_notifications.cosmos_notifications_container = notification_container
+ functions_group.get_user_groups = lambda user_id: []
+ functions_public_workspaces.get_user_public_workspaces = lambda user_id: []
+
+ try:
+ result = functions_notifications.get_user_notifications(
+ user_id='legacy-user',
+ page=1,
+ per_page=20,
+ include_read=True,
+ include_dismissed=False,
+ user_roles=[]
+ )
+ notifications = {item['id']: item for item in result['notifications']}
+
+ approval_message = notifications['approval-denied-legacy']['message']
+ if 'Testing the rejection reason in notifications' not in approval_message:
+ print(f'❌ Approval denial display message missing reason fallback: {approval_message}')
+ return False
+
+ template_message = notifications['template-rejected-legacy']['message']
+ if 'Testing the rejection reason in notifications' not in template_message:
+ print(f'❌ Template rejection display message missing reason fallback: {template_message}')
+ return False
+
+ print('✅ Notification display backfills rejection reasons from metadata')
+ return True
+ finally:
+ functions_notifications.cosmos_notifications_container = originals['notification_container']
+ functions_group.get_user_groups = originals['get_user_groups']
+ functions_public_workspaces.get_user_public_workspaces = originals['get_user_public_workspaces']
+
+
+if __name__ == '__main__':
+ tests = [
+ test_standard_approval_notifications_and_cleanup,
+ test_agent_template_review_notifications_and_cleanup,
+ test_notification_display_backfills_rejection_reasons_from_metadata,
+ ]
+
+ results = []
+ for test in tests:
+ print(f'\n🧪 Running {test.__name__}...')
+ results.append(test())
+
+ success = all(results)
+ print(f'\n📊 Results: {sum(results)}/{len(results)} tests passed')
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_azure_di_diagnostics.py b/functional_tests/test_azure_di_diagnostics.py
index d736f880..58b04cea 100644
--- a/functional_tests/test_azure_di_diagnostics.py
+++ b/functional_tests/test_azure_di_diagnostics.py
@@ -176,7 +176,7 @@ def test_azure_di_api_call():
try:
os.unlink(test_pdf_path)
print(f"✅ Cleaned up test file: {test_pdf_path}")
- except:
+ except Exception as ex:
pass
except Exception as e:
diff --git a/functional_tests/test_backend_agents_swagger_integration.py b/functional_tests/test_backend_agents_swagger_integration.py
index 5bb4ed3e..d8f518d8 100644
--- a/functional_tests/test_backend_agents_swagger_integration.py
+++ b/functional_tests/test_backend_agents_swagger_integration.py
@@ -129,7 +129,7 @@ def test_backend_agents_swagger_integration():
security_count += 1
print(f" ✅ Security configured for authentication")
break
- except:
+ except Exception as ex:
pass
if security_count > 0:
diff --git a/functional_tests/test_backend_chats_swagger_integration.py b/functional_tests/test_backend_chats_swagger_integration.py
index 9d18721e..60d22550 100644
--- a/functional_tests/test_backend_chats_swagger_integration.py
+++ b/functional_tests/test_backend_chats_swagger_integration.py
@@ -103,7 +103,7 @@ def test_backend_chats_swagger_integration():
security_count += 1
print(f" ✅ Security configured for authentication")
break
- except:
+ except Exception as ex:
pass
if security_count > 0:
diff --git a/functional_tests/test_chat_model_description_tooltip.py b/functional_tests/test_chat_model_description_tooltip.py
new file mode 100644
index 00000000..b510302f
--- /dev/null
+++ b/functional_tests/test_chat_model_description_tooltip.py
@@ -0,0 +1,31 @@
+# test_chat_model_description_tooltip.py
+#!/usr/bin/env python3
+"""
+Functional test for chat model description tooltip.
+Version: 0.236.023
+Implemented in: 0.236.023
+
+This test ensures multi-endpoint model options include a title attribute
+that can display the model description on hover.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_chat_model_description_tooltip():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ template_path = os.path.join(repo_root, 'application', 'single_app', 'templates', 'chats.html')
+ content = read_file_text(template_path)
+
+ assert 'title="{{ model.description or model.display_name }}"' in content, "Model tooltip title missing in chat model select."
+
+ print("✅ Chat model description tooltip verified.")
+
+
+if __name__ == "__main__":
+ test_chat_model_description_tooltip()
diff --git a/functional_tests/test_chat_stream_background_execution.py b/functional_tests/test_chat_stream_background_execution.py
index 9ec9c8f6..c6263b05 100644
--- a/functional_tests/test_chat_stream_background_execution.py
+++ b/functional_tests/test_chat_stream_background_execution.py
@@ -2,12 +2,13 @@
# test_chat_stream_background_execution.py
"""
Functional test for chat stream background execution.
-Version: 0.239.143
+Version: 0.239.185
Implemented in: 0.239.129
This test ensures that the streaming chat route runs its SSE generator through
background execution so chat completion can continue after the browser leaves
-the page, while still streaming live events to an attached consumer.
+the page, while still streaming live events to an attached consumer and any
+later reattached consumer.
"""
import sys
@@ -26,7 +27,7 @@ def assert_contains(file_path: Path, expected: str) -> None:
raise AssertionError(f"Expected to find {expected!r} in {file_path}")
-def test_chat_stream_background_execution() -> bool:
+def test_chat_stream_background_execution() -> None:
print("Testing chat stream background execution...")
assert_contains(ROUTE_FILE, "class BackgroundStreamBridge:")
@@ -34,21 +35,24 @@ def test_chat_stream_background_execution() -> bool:
assert_contains(ROUTE_FILE, "executor = current_app.extensions.get('executor')")
assert_contains(ROUTE_FILE, "executor.submit(stream_worker)")
assert_contains(ROUTE_FILE, "worker_thread = threading.Thread(target=stream_worker, daemon=True)")
- assert_contains(ROUTE_FILE, "for event in event_generator_factory():")
+ assert_contains(ROUTE_FILE, "def publish_background_event(event_text):")
+ assert_contains(ROUTE_FILE, "event_iterator = event_generator_factory(")
+ assert_contains(ROUTE_FILE, "for event in event_iterator:")
assert_contains(ROUTE_FILE, "stream_bridge.detach_consumer()")
- assert_contains(ROUTE_FILE, "return build_background_stream_response(generate_compatibility_response)")
- assert_contains(ROUTE_FILE, "return build_background_stream_response(generate)")
+ assert_contains(ROUTE_FILE, "CHAT_STREAM_REGISTRY = ActiveConversationStreamRegistry()")
+ assert_contains(ROUTE_FILE, "return build_background_stream_response(generate_compatibility_response, stream_session=stream_session)")
+ assert_contains(ROUTE_FILE, "return build_background_stream_response(generate, stream_session=stream_session)")
- assert_contains(CONFIG_FILE, 'VERSION = "0.239.143"')
+ assert_contains(CONFIG_FILE, 'VERSION = "0.239.185"')
assert_contains(FIX_DOC_FILE, "Fixed/Implemented in version: **0.239.129**")
print("Chat stream background execution checks passed!")
- return True
if __name__ == "__main__":
try:
- success = test_chat_stream_background_execution()
+ test_chat_stream_background_execution()
+ success = True
except Exception as exc:
print(f"Test failed: {exc}")
import traceback
diff --git a/functional_tests/test_chat_stream_compatibility_sse_syntax.py b/functional_tests/test_chat_stream_compatibility_sse_syntax.py
index c49bd45d..6474c132 100644
--- a/functional_tests/test_chat_stream_compatibility_sse_syntax.py
+++ b/functional_tests/test_chat_stream_compatibility_sse_syntax.py
@@ -2,7 +2,7 @@
# test_chat_stream_compatibility_sse_syntax.py
"""
Functional test for chat stream compatibility SSE syntax.
-Version: 0.239.143
+Version: 0.239.185
Implemented in: 0.239.134
This test ensures that the streaming chat route compiles successfully and that
@@ -26,7 +26,7 @@ def assert_contains(file_path: Path, expected: str) -> None:
raise AssertionError(f"Expected to find {expected!r} in {file_path}")
-def test_chat_stream_compatibility_sse_syntax() -> bool:
+def test_chat_stream_compatibility_sse_syntax() -> None:
print("Testing chat stream compatibility SSE syntax...")
source = ROUTE_FILE.read_text(encoding="utf-8")
@@ -36,16 +36,16 @@ def test_chat_stream_compatibility_sse_syntax() -> bool:
assert_contains(ROUTE_FILE, "image_request_event = {")
assert_contains(ROUTE_FILE, "image_ready_event = {")
assert_contains(ROUTE_FILE, 'yield f"data: {json.dumps(image_prompt_event)}\\n\\n"')
- assert_contains(CONFIG_FILE, 'VERSION = "0.239.143"')
+ assert_contains(CONFIG_FILE, 'VERSION = "0.239.185"')
assert_contains(FIX_DOC_FILE, "Fixed/Implemented in version: **0.239.134**")
print("Chat stream compatibility SSE syntax checks passed!")
- return True
if __name__ == "__main__":
try:
- success = test_chat_stream_compatibility_sse_syntax()
+ test_chat_stream_compatibility_sse_syntax()
+ success = True
except Exception as exc:
print(f"Test failed: {exc}")
import traceback
diff --git a/functional_tests/test_chat_stream_debug_logging.py b/functional_tests/test_chat_stream_debug_logging.py
index 396bb774..c28fced0 100644
--- a/functional_tests/test_chat_stream_debug_logging.py
+++ b/functional_tests/test_chat_stream_debug_logging.py
@@ -2,7 +2,7 @@
# test_chat_stream_debug_logging.py
"""
Functional test for chat stream debug logging.
-Version: 0.239.143
+Version: 0.239.185
Implemented in: 0.239.142
This test ensures that the streaming chat route retains unconditional
@@ -25,7 +25,7 @@ def assert_contains(file_path: Path, expected: str) -> None:
raise AssertionError(f"Expected to find {expected!r} in {file_path}")
-def test_chat_stream_debug_logging() -> bool:
+def test_chat_stream_debug_logging() -> None:
print("Testing chat stream debug logging markers...")
assert_contains(ROUTE_FILE, '[Streaming] Incoming /api/chat/stream request | ')
@@ -34,18 +34,17 @@ def test_chat_stream_debug_logging() -> bool:
assert_contains(ROUTE_FILE, '[Streaming] Cleared plugin invocations for user_id=')
assert_contains(ROUTE_FILE, '[Streaming] Selected response path | ')
assert_contains(ROUTE_FILE, '[Streaming][Plugin Callback] Registering callback for key=')
- assert_contains(ROUTE_FILE, '[Streaming][Plugin Callback] Received invocation ')
assert_contains(ROUTE_FILE, '[Streaming][Plugin Callback] Deregistered callback after successful stream for key=')
assert_contains(ROUTE_FILE, '[Streaming] Finalizing stream response | ')
- assert_contains(CONFIG_FILE, 'VERSION = "0.239.143"')
+ assert_contains(CONFIG_FILE, 'VERSION = "0.239.185"')
print("Chat stream debug logging checks passed!")
- return True
if __name__ == "__main__":
try:
- success = test_chat_stream_debug_logging()
+ test_chat_stream_debug_logging()
+ success = True
except Exception as exc:
print(f"Test failed: {exc}")
import traceback
diff --git a/functional_tests/test_chat_stream_heartbeat_reattach.py b/functional_tests/test_chat_stream_heartbeat_reattach.py
new file mode 100644
index 00000000..417f0239
--- /dev/null
+++ b/functional_tests/test_chat_stream_heartbeat_reattach.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+# test_chat_stream_heartbeat_reattach.py
+"""
+Functional test for chat stream heartbeat and reattach support.
+Version: 0.239.185
+Implemented in: 0.239.183
+
+This test ensures long-running chat streams emit keep-alive heartbeat frames,
+register replayable in-flight sessions for reconnecting consumers through the
+shared app cache, and that the chat UI attempts to reattach when a user
+reopens an active conversation.
+"""
+
+import sys
+from pathlib import Path
+
+
+ROOT = Path(__file__).resolve().parents[1]
+ROUTE_FILE = ROOT / "application" / "single_app" / "route_backend_chats.py"
+APP_CACHE_FILE = ROOT / "application" / "single_app" / "app_settings_cache.py"
+STREAMING_FILE = ROOT / "application" / "single_app" / "static" / "js" / "chat" / "chat-streaming.js"
+CONVERSATIONS_FILE = ROOT / "application" / "single_app" / "static" / "js" / "chat" / "chat-conversations.js"
+CONFIG_FILE = ROOT / "application" / "single_app" / "config.py"
+FIX_DOC_FILE = ROOT / "docs" / "explanation" / "fixes" / "CHAT_STREAM_HEARTBEAT_REATTACH_FIX.md"
+
+
+def assert_contains(file_path: Path, expected: str) -> None:
+ content = file_path.read_text(encoding="utf-8")
+ if expected not in content:
+ raise AssertionError(f"Expected to find {expected!r} in {file_path}")
+
+
+def assert_not_contains(file_path: Path, forbidden: str) -> None:
+ content = file_path.read_text(encoding="utf-8")
+ if forbidden in content:
+ raise AssertionError(f"Did not expect to find {forbidden!r} in {file_path}")
+
+
+def test_chat_stream_heartbeat_and_reattach() -> None:
+ print("Testing chat stream heartbeat and reattach support...")
+
+ assert_contains(ROUTE_FILE, "yield ': keep-alive\\n\\n'")
+ assert_contains(ROUTE_FILE, "class ActiveConversationStreamSession:")
+ assert_contains(ROUTE_FILE, "CHAT_STREAM_REGISTRY = ActiveConversationStreamRegistry()")
+ assert_contains(ROUTE_FILE, "@app.route('/api/chat/stream/status/', methods=['GET'])")
+ assert_contains(ROUTE_FILE, "@app.route('/api/chat/stream/reattach/', methods=['GET'])")
+ assert_contains(ROUTE_FILE, "stream_with_context(stream_session.iter_events())")
+ assert_contains(ROUTE_FILE, "import app_settings_cache")
+ assert_contains(ROUTE_FILE, "app_settings_cache.initialize_stream_session_cache(")
+ assert_contains(ROUTE_FILE, "app_settings_cache.append_stream_session_event(")
+ assert_contains(ROUTE_FILE, "app_settings_cache.get_stream_session_events(")
+
+ assert_contains(APP_CACHE_FILE, "APP_STREAM_SESSION_METADATA = {}")
+ assert_contains(APP_CACHE_FILE, "APP_STREAM_SESSION_EVENTS = {}")
+ assert_contains(APP_CACHE_FILE, "def initialize_stream_session_cache_redis(cache_key, metadata, ttl_seconds=None):")
+ assert_contains(APP_CACHE_FILE, "def append_stream_session_event_redis(cache_key, event_text, ttl_seconds=None):")
+ assert_contains(APP_CACHE_FILE, "def get_stream_session_events_mem(cache_key, start_index=0):")
+
+ assert_contains(STREAMING_FILE, "export async function reattachStreamingConversation(conversationId)")
+ assert_contains(STREAMING_FILE, "fetch(`/api/chat/stream/status/${conversationId}`")
+ assert_contains(STREAMING_FILE, "fetch(`/api/chat/stream/reattach/${conversationId}`")
+ assert_not_contains(STREAMING_FILE, "5 * 60 * 1000")
+
+ assert_contains(CONVERSATIONS_FILE, "await loadMessages(conversationId);")
+ assert_contains(CONVERSATIONS_FILE, "await streamingModule.reattachStreamingConversation(conversationId);")
+
+ assert_contains(CONFIG_FILE, 'VERSION = "0.239.185"')
+ assert_contains(FIX_DOC_FILE, "Fixed/Implemented in version: **0.239.183**")
+ assert_contains(FIX_DOC_FILE, "Redis-backed session metadata and event replay")
+
+ print("Chat stream heartbeat and reattach checks passed!")
+
+
+if __name__ == "__main__":
+ try:
+ test_chat_stream_heartbeat_and_reattach()
+ success = True
+ except Exception as exc:
+ print(f"Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ success = False
+
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_chat_tagging_and_endpoint_provider_visibility.py b/functional_tests/test_chat_tagging_and_endpoint_provider_visibility.py
new file mode 100644
index 00000000..25b2b4e4
--- /dev/null
+++ b/functional_tests/test_chat_tagging_and_endpoint_provider_visibility.py
@@ -0,0 +1,105 @@
+# test_chat_tagging_and_endpoint_provider_visibility.py
+#!/usr/bin/env python3
+"""
+Functional test for chat tagging and endpoint provider visibility.
+Version: 0.239.177
+Implemented in: 0.239.177
+
+This test ensures supported New Foundry endpoints are exposed to user-facing
+endpoint payloads, streaming group-agent conversations preserve group
+metadata, personal conversations no longer render visible tags, the active
+conversation header shows the full group name, and the sidebar shows the first
+8 characters of the group name.
+"""
+
+import os
+import sys
+
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
+
+def _read_text(relative_path):
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ with open(os.path.join(repo_root, relative_path), "r", encoding="utf-8") as handle:
+ return handle.read()
+
+
+def test_endpoint_provider_visibility_guard() -> None:
+ """Verify supported endpoint providers are exposed to frontend flows."""
+ print("🔍 Testing endpoint provider visibility guard...")
+
+ try:
+ settings_text = _read_text("application/single_app/functions_settings.py")
+
+ required_snippets = [
+ "def is_frontend_visible_model_endpoint_provider(provider):",
+ 'return normalized_provider in {"aoai", "aifoundry", "new_foundry"}',
+ 'if not is_frontend_visible_model_endpoint_provider(endpoint.get("provider")):',
+ 'if is_frontend_visible_model_endpoint_provider(endpoint.get("provider")):',
+ ]
+
+ missing = [snippet for snippet in required_snippets if snippet not in settings_text]
+ if missing:
+ raise AssertionError(f"Missing endpoint visibility snippets: {', '.join(missing)}")
+
+ print("✅ Frontend endpoint provider filtering is wired")
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ raise
+
+
+def test_group_agent_tagging_and_personal_badge_removal() -> None:
+ """Verify group-agent metadata survives streaming and UI tags render correctly."""
+ print("🔍 Testing group-agent tagging and personal badge removal...")
+
+ try:
+ chat_conversations_text = _read_text("application/single_app/static/js/chat/chat-conversations.js")
+ chat_details_text = _read_text("application/single_app/static/js/chat/chat-conversation-details.js")
+ chat_sidebar_text = _read_text("application/single_app/static/js/chat/chat-sidebar-conversations.js")
+ chat_backend_text = _read_text("application/single_app/route_backend_chats.py")
+
+ required_snippets = [
+ 'selected_agent=agent_name_used if use_agent_streaming else None',
+ 'selected_agent_details=selected_agent_metadata if use_agent_streaming else None',
+ 'return normalizedName.slice(0, 8);',
+ "groupBadge.textContent = (groupName || 'group').trim() || 'group';",
+ "badge.textContent = getShortGroupLabel(groupName);",
+ 'return `${escapeHtml(groupName)}`;',
+ "return 'personal';",
+ ]
+ missing = [
+ snippet
+ for snippet in required_snippets
+ if snippet not in f"{chat_conversations_text}\n{chat_details_text}\n{chat_sidebar_text}\n{chat_backend_text}"
+ ]
+ if missing:
+ raise AssertionError(f"Missing tagging snippets: {', '.join(missing)}")
+
+ if 'badge.textContent = \'personal\'' in chat_sidebar_text:
+ raise AssertionError("Personal conversations still render a visible personal badge in the sidebar")
+
+ print("✅ Group-agent metadata and conversation tag rendering are wired correctly")
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ raise
+
+
+if __name__ == "__main__":
+ tests = [
+ test_endpoint_provider_visibility_guard,
+ test_group_agent_tagging_and_personal_badge_removal,
+ ]
+
+ results = []
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ results.append(test())
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(tests)} tests passed")
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_chat_toolbar_layout.py b/functional_tests/test_chat_toolbar_layout.py
new file mode 100644
index 00000000..f377f1d3
--- /dev/null
+++ b/functional_tests/test_chat_toolbar_layout.py
@@ -0,0 +1,76 @@
+# test_chat_toolbar_layout.py
+#!/usr/bin/env python3
+"""
+Functional test for chat toolbar layout separation.
+Version: 0.239.170
+Implemented in: 0.239.170
+
+This test ensures the chat selectors and toggle buttons remain separate sibling
+groups, stay aligned on wide layouts, and switch to clean full-width toolbar
+rows before overlap occurs on narrower layouts.
+"""
+
+import os
+import re
+import sys
+
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
+
+def _read_text(relative_path):
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ with open(os.path.join(repo_root, relative_path), "r", encoding="utf-8") as handle:
+ return handle.read()
+
+
+def test_chat_toolbar_groups_are_responsive_without_overlap():
+ """Verify the toolbar layout has both desktop and medium-width guardrails."""
+ print("🔍 Testing chat toolbar group alignment...")
+
+ try:
+ template_text = _read_text("application/single_app/templates/chats.html")
+ css_text = _read_text("application/single_app/static/css/chats.css")
+
+ structure_pattern = re.compile(
+ r'
.*?
.*?
\s*
',
+ re.DOTALL,
+ )
+
+ if not structure_pattern.search(template_text):
+ print("❌ Chat toolbar selectors and toggles are not separate sibling groups")
+ return False
+
+ required_css_snippets = [
+ ".chat-toolbar {",
+ "flex-wrap: nowrap;",
+ "align-items: flex-end;",
+ ".chat-toolbar-controls {",
+ "flex-wrap: nowrap;",
+ "align-items: flex-end;",
+ ".chat-toolbar-toggles {",
+ ".chat-toolbar-selectors {",
+ "@media (max-width: 1200px) {",
+ "flex: 1 1 100%;",
+ "justify-content: flex-start;",
+ "@media (max-width: 768px) {",
+ ]
+
+ missing_css = [snippet for snippet in required_css_snippets if snippet not in css_text]
+ if missing_css:
+ print(f"❌ Missing toolbar layout CSS snippets: {', '.join(missing_css)}")
+ return False
+
+ print("✅ Chat toolbar selectors and toggles remain aligned without medium-width overlap")
+ return True
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ return False
+
+
+if __name__ == "__main__":
+ success = test_chat_toolbar_groups_are_responsive_without_overlap()
+ print(f"\n📊 Results: {1 if success else 0}/1 tests passed")
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_chat_type_normalization.py b/functional_tests/test_chat_type_normalization.py
new file mode 100644
index 00000000..dec83b6e
--- /dev/null
+++ b/functional_tests/test_chat_type_normalization.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python3
+"""
+Functional test for chat type normalization.
+Version: 0.236.065
+Implemented in: 0.236.065
+
+This test ensures new conversations are marked with chat_type "new" and
+personal conversations normalize to "personal_single_user" across the UI
+and backend metadata paths.
+"""
+
+import os
+import sys
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
+
+def _read_file(path):
+ with open(path, "r", encoding="utf-8") as handle:
+ return handle.read()
+
+
+def _assert_contains(text, snippet, file_label):
+ if snippet not in text:
+ raise AssertionError(f"Missing '{snippet}' in {file_label}")
+
+
+def test_chat_type_normalization():
+ """Verify chat_type normalization is wired across UI and backend."""
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+
+ js_conversations_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "chat",
+ "chat-conversations.js",
+ )
+ js_details_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "chat",
+ "chat-conversation-details.js",
+ )
+ backend_conversations_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "route_backend_conversations.py",
+ )
+ backend_chats_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "route_backend_chats.py",
+ )
+ metadata_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "functions_conversation_metadata.py",
+ )
+
+ js_conversations = _read_file(js_conversations_path)
+ js_details = _read_file(js_details_path)
+ backend_conversations = _read_file(backend_conversations_path)
+ backend_chats = _read_file(backend_chats_path)
+ metadata = _read_file(metadata_path)
+
+ _assert_contains(js_conversations, 'chat_type: "new"', "chat-conversations.js")
+ _assert_contains(js_conversations, 'personal_single_user', "chat-conversations.js")
+ _assert_contains(js_details, 'personal_single_user', "chat-conversation-details.js")
+ _assert_contains(backend_conversations, "personal_single_user", "route_backend_conversations.py")
+ _assert_contains(backend_chats, "personal_single_user", "route_backend_chats.py")
+ _assert_contains(metadata, "personal_single_user", "functions_conversation_metadata.py")
+
+ print("✅ Chat type normalization verified")
+ return True
+
+
+if __name__ == "__main__":
+ success = test_chat_type_normalization()
+ sys.exit(0 if success else 1)
diff --git a/functional_tests/test_control_center_token_filters.py b/functional_tests/test_control_center_token_filters.py
new file mode 100644
index 00000000..fd01fd92
--- /dev/null
+++ b/functional_tests/test_control_center_token_filters.py
@@ -0,0 +1,133 @@
+# test_control_center_token_filters.py
+"""
+Functional test for Control Center token filters.
+Version: 0.239.164
+Implemented in: 0.239.164
+
+This test ensures that the Control Center token filters are wired through the
+backend APIs, dashboard template, and client-side request handling.
+"""
+
+from pathlib import Path
+import sys
+
+
+ROOT = Path(__file__).resolve().parents[1]
+APP_DIR = ROOT / "application" / "single_app"
+
+if str(APP_DIR) not in sys.path:
+ sys.path.insert(0, str(APP_DIR))
+
+
+def read_text(relative_path: str) -> str:
+ """Read a repository file as UTF-8 text."""
+ return (ROOT / relative_path).read_text(encoding="utf-8")
+
+
+def test_backend_token_filter_routes_and_helpers_present() -> bool:
+ """Validate the control center backend exposes token filter support."""
+ print("Testing control center token filter backend wiring...")
+ backend_content = read_text("application/single_app/route_backend_control_center.py")
+
+ required_snippets = [
+ "/api/admin/control-center/token-filters",
+ "def extract_token_filters(source):",
+ "def append_token_usage_filters(query_conditions, parameters, token_filters):",
+ "def build_token_usage_query_context(start_date, end_date, token_filters=None):",
+ "token_filters = extract_token_filters(request.args)",
+ "token_filters = extract_token_filters(data)",
+ "c.usage.model = @token_model",
+ "c.workspace_context.group_id = @token_group_id",
+ "c.workspace_context.public_workspace_id = @token_public_workspace_id"
+ ]
+
+ for snippet in required_snippets:
+ if snippet not in backend_content:
+ print(f"Missing backend snippet: {snippet}")
+ return False
+
+ print("Backend token filter support found.")
+ return True
+
+
+def test_control_center_template_contains_token_filter_controls() -> bool:
+ """Validate the dashboard template includes token filter controls."""
+ print("Testing control center token filter template controls...")
+ template_content = read_text("application/single_app/templates/control_center.html")
+
+ required_ids = [
+ 'id="tokenUserFilter"',
+ 'id="tokenWorkspaceTypeFilter"',
+ 'id="tokenGroupFilter"',
+ 'id="tokenPublicWorkspaceFilter"',
+ 'id="tokenModelFilter"',
+ 'id="tokenTypeFilter"',
+ 'id="tokenApplyFiltersBtn"',
+ 'id="tokenResetFiltersBtn"'
+ ]
+
+ for element_id in required_ids:
+ if element_id not in template_content:
+ print(f"Missing template element: {element_id}")
+ return False
+
+ print("Template token filter controls found.")
+ return True
+
+
+def test_control_center_javascript_wires_token_filter_requests() -> bool:
+ """Validate the client script loads and forwards token filters."""
+ print("Testing control center token filter JavaScript wiring...")
+ js_content = read_text("application/single_app/static/js/control-center.js")
+
+ required_snippets = [
+ "this.tokenFilters = this.getDefaultTokenFilters();",
+ "loadTokenFilterOptions()",
+ "getTokenFilterRequestPayload()",
+ "applyTokenFilters()",
+ "resetTokenFilters()",
+ "syncTokenFiltersFromControls()",
+ "params.append(key, value);",
+ "exportData.token_filters = tokenFilters;",
+ "chatData.token_filters = tokenFilters;",
+ "'/api/admin/control-center/token-filters'"
+ ]
+
+ for snippet in required_snippets:
+ if snippet not in js_content:
+ print(f"Missing JavaScript snippet: {snippet}")
+ return False
+
+ print("JavaScript token filter wiring found.")
+ return True
+
+
+def test_config_version_bumped_for_token_filters() -> bool:
+ """Validate the repository version bump for the feature."""
+ print("Testing config version bump...")
+ config_content = read_text("application/single_app/config.py")
+
+ if 'VERSION = "0.239.164"' not in config_content:
+ print("Config version was not bumped to 0.239.164")
+ return False
+
+ print("Config version bump found.")
+ return True
+
+
+if __name__ == "__main__":
+ checks = [
+ test_backend_token_filter_routes_and_helpers_present,
+ test_control_center_template_contains_token_filter_controls,
+ test_control_center_javascript_wires_token_filter_requests,
+ test_config_version_bumped_for_token_filters,
+ ]
+
+ results = []
+ for check in checks:
+ print(f"\nRunning {check.__name__}...")
+ results.append(check())
+
+ success = all(results)
+ print(f"\nResults: {sum(results)}/{len(results)} checks passed")
+ raise SystemExit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_conversation_metrics.py b/functional_tests/test_conversation_metrics.py
index ad53b9a2..ecce9161 100644
--- a/functional_tests/test_conversation_metrics.py
+++ b/functional_tests/test_conversation_metrics.py
@@ -61,7 +61,7 @@ def test_conversation_metrics_structure():
try:
date_obj = datetime.fromisoformat(last_updated.replace('Z', '+00:00'))
last_day_conversation = date_obj.strftime('%m/%d/%Y')
- except:
+ except Exception as ex:
last_day_conversation = 'Invalid date'
print(f" - Most Recent Conversation: {last_day_conversation}")
@@ -126,7 +126,7 @@ def test_date_formatting():
try:
date_obj = datetime.fromisoformat(test_date.replace('Z', '+00:00'))
result = date_obj.strftime('%m/%d/%Y')
- except:
+ except Exception as ex:
result = 'Invalid date'
print(f" Result: {result}")
diff --git a/functional_tests/test_custom_endpoint_settings_migration.py b/functional_tests/test_custom_endpoint_settings_migration.py
new file mode 100644
index 00000000..1c8fde52
--- /dev/null
+++ b/functional_tests/test_custom_endpoint_settings_migration.py
@@ -0,0 +1,87 @@
+# test_custom_endpoint_settings_migration.py
+"""
+Functional test for custom endpoint settings migration.
+Version: 0.236.058
+Implemented in: 0.236.058
+
+This test ensures legacy custom endpoint settings are migrated to the new
+workspace-scoped flags and kept in sync.
+"""
+
+import os
+import sys
+
+repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+sys.path.append(repo_root)
+
+from application.single_app.functions_settings import apply_custom_endpoint_setting_migration
+
+
+def test_custom_endpoint_settings_migration_from_legacy():
+ """Ensure legacy custom endpoint flags migrate to new settings."""
+ print("🔍 Validating custom endpoint settings migration...")
+
+ settings = {
+ "allow_user_custom_agent_endpoints": True,
+ "allow_group_custom_agent_endpoints": False
+ }
+
+ updated = apply_custom_endpoint_setting_migration(settings)
+
+ assert updated is True
+ assert settings["allow_user_custom_endpoints"] is True
+ assert settings["allow_group_custom_endpoints"] is False
+ assert settings["allow_user_custom_agent_endpoints"] is True
+ assert settings["allow_group_custom_agent_endpoints"] is False
+
+ print("✅ Custom endpoint settings migration passed.")
+
+
+def test_custom_endpoint_settings_migration_syncs_legacy():
+ """Ensure legacy flags stay in sync with new settings."""
+ print("🔍 Validating legacy flag sync for custom endpoints...")
+
+ settings = {
+ "allow_user_custom_endpoints": False,
+ "allow_group_custom_endpoints": True,
+ "allow_user_custom_agent_endpoints": True,
+ "allow_group_custom_agent_endpoints": False
+ }
+
+ updated = apply_custom_endpoint_setting_migration(settings)
+
+ assert updated is True
+ assert settings["allow_user_custom_agent_endpoints"] is False
+ assert settings["allow_group_custom_agent_endpoints"] is True
+
+ print("✅ Legacy flag sync passed.")
+
+
+def run_tests():
+ tests = [
+ test_custom_endpoint_settings_migration_from_legacy,
+ test_custom_endpoint_settings_migration_syncs_legacy
+ ]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ success = run_tests()
+ sys.exit(0 if success else 1)
diff --git a/functional_tests/test_default_model_selection_fallback.py b/functional_tests/test_default_model_selection_fallback.py
new file mode 100644
index 00000000..c6558632
--- /dev/null
+++ b/functional_tests/test_default_model_selection_fallback.py
@@ -0,0 +1,74 @@
+# test_default_model_selection_fallback.py
+#!/usr/bin/env python3
+"""
+Functional test for default model selection fallback.
+Version: 0.236.053
+Implemented in: 0.236.053
+
+This test ensures default model selection is surfaced in admin settings
+and used for fallback GPT initialization when agent requests omit model info.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file:
+ return file.read()
+
+
+def test_default_model_selection_wiring():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ admin_template_path = os.path.join(
+ repo_root, "application", "single_app", "templates", "admin_settings.html"
+ )
+ admin_route_path = os.path.join(
+ repo_root, "application", "single_app", "route_frontend_admin_settings.py"
+ )
+ chat_path = os.path.join(
+ repo_root, "application", "single_app", "route_backend_chats.py"
+ )
+
+ admin_template = read_file_text(admin_template_path)
+ admin_route = read_file_text(admin_route_path)
+ chat_route = read_file_text(chat_path)
+
+ assert "default_model_selection_json" in admin_template, (
+ "Expected default model selection input in admin settings template."
+ )
+ assert "default-model-selection" in admin_template, (
+ "Expected default model selection dropdown in admin settings template."
+ )
+ assert "default_model_selection" in admin_route, (
+ "Expected default model selection to be handled in admin settings save."
+ )
+ assert "resolve_default_model_gpt_config" in chat_route, (
+ "Expected default model fallback logic in chat route."
+ )
+
+ print("✅ Default model selection wiring verified.")
+
+
+def run_tests():
+ tests = [test_default_model_selection_wiring]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ raise SystemExit(0 if run_tests() else 1)
diff --git a/functional_tests/test_document_metadata_update_activity_logging.py b/functional_tests/test_document_metadata_update_activity_logging.py
index 16af038a..f29768f5 100644
--- a/functional_tests/test_document_metadata_update_activity_logging.py
+++ b/functional_tests/test_document_metadata_update_activity_logging.py
@@ -220,7 +220,7 @@ def test_metadata_update_logging():
partition_key=test_user_id
)
print("✓ Test document deleted")
- except:
+ except Exception as ex:
print("⚠️ Test document already deleted or not found")
# Delete all activity logs for test user
@@ -230,7 +230,7 @@ def test_metadata_update_logging():
item=log['id'],
partition_key=log['user_id']
)
- except:
+ except Exception as ex:
pass
print("✓ Activity logs cleaned up")
diff --git a/functional_tests/test_document_metrics_database_queries.py b/functional_tests/test_document_metrics_database_queries.py
index c661a751..b5fba265 100644
--- a/functional_tests/test_document_metrics_database_queries.py
+++ b/functional_tests/test_document_metrics_database_queries.py
@@ -85,10 +85,10 @@ def test_document_metrics_queries():
# Try different date formats
try:
dt = datetime.fromisoformat(last_updated.replace('Z', '+00:00'))
- except:
+ except Exception as ex:
try:
dt = datetime.strptime(last_updated, '%Y-%m-%d')
- except:
+ except Exception as ex:
dt = datetime.strptime(last_updated, '%Y-%m-%dT%H:%M:%S')
else:
dt = last_updated
diff --git a/functional_tests/test_document_metrics_implementation_verification.py b/functional_tests/test_document_metrics_implementation_verification.py
index e8583041..a7c2a303 100644
--- a/functional_tests/test_document_metrics_implementation_verification.py
+++ b/functional_tests/test_document_metrics_implementation_verification.py
@@ -147,10 +147,10 @@ def validate_date_format():
# Try different date formats
try:
dt = datetime.fromisoformat(test_date.replace('Z', '+00:00'))
- except:
+ except Exception as ex:
try:
dt = datetime.strptime(test_date, '%Y-%m-%d')
- except:
+ except Exception as ex:
dt = datetime.strptime(test_date, '%Y-%m-%dT%H:%M:%S')
formatted = dt.strftime('%m/%d/%Y')
diff --git a/functional_tests/test_document_upload_fix.py b/functional_tests/test_document_upload_fix.py
index e97f2be1..e38d13f7 100644
--- a/functional_tests/test_document_upload_fix.py
+++ b/functional_tests/test_document_upload_fix.py
@@ -122,7 +122,7 @@ def test_azure_di_api_parameters():
# Clean up temp file
try:
os.unlink(temp_file_path)
- except:
+ except Exception as ex:
pass
except ImportError as e:
diff --git a/functional_tests/test_document_upload_traceback_shadow_fix.py b/functional_tests/test_document_upload_traceback_shadow_fix.py
new file mode 100644
index 00000000..6283a441
--- /dev/null
+++ b/functional_tests/test_document_upload_traceback_shadow_fix.py
@@ -0,0 +1,121 @@
+# test_document_upload_traceback_shadow_fix.py
+"""
+Functional test for document upload traceback shadowing fix.
+Version: 0.239.165
+Implemented in: 0.239.165
+
+This test ensures PDF and DOCX upload processing no longer shadows the traceback
+module inside process_di_document, which previously caused upload failures when
+exception handling tried to call traceback.format_exc().
+"""
+
+import ast
+import os
+import sys
+
+
+FUNCTIONS_DOCUMENTS_PATH = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "..",
+ "application",
+ "single_app",
+ "functions_documents.py",
+)
+
+
+def _load_module_ast():
+ with open(FUNCTIONS_DOCUMENTS_PATH, "r", encoding="utf-8") as source_file:
+ return ast.parse(source_file.read(), filename=FUNCTIONS_DOCUMENTS_PATH)
+
+
+def test_traceback_import_is_module_scoped():
+ """Verify traceback is imported at module scope for functions_documents."""
+ print("🔍 Checking module-level traceback import...")
+
+ try:
+ module_ast = _load_module_ast()
+ has_module_import = any(
+ isinstance(node, ast.Import)
+ and any(alias.name == "traceback" for alias in node.names)
+ for node in module_ast.body
+ )
+
+ if not has_module_import:
+ print("❌ Module-level 'import traceback' not found")
+ return False
+
+ print("✅ Module-level 'import traceback' found")
+ return True
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ traceback_module = __import__("traceback")
+ traceback_module.print_exc()
+ return False
+
+
+def test_process_di_document_has_no_local_traceback_import():
+ """Verify process_di_document no longer defines traceback as a local name."""
+ print("🔍 Checking process_di_document for local traceback imports...")
+
+ try:
+ module_ast = _load_module_ast()
+ process_di_document_node = next(
+ (
+ node
+ for node in module_ast.body
+ if isinstance(node, ast.FunctionDef) and node.name == "process_di_document"
+ ),
+ None,
+ )
+
+ if process_di_document_node is None:
+ print("❌ process_di_document function not found")
+ return False
+
+ local_traceback_imports = [
+ node
+ for node in ast.walk(process_di_document_node)
+ if isinstance(node, ast.Import)
+ and any(alias.name == "traceback" for alias in node.names)
+ ]
+
+ if local_traceback_imports:
+ print("❌ Found function-local 'import traceback' in process_di_document")
+ return False
+
+ uses_traceback_format_exc = any(
+ isinstance(node, ast.Call)
+ and isinstance(node.func, ast.Attribute)
+ and isinstance(node.func.value, ast.Name)
+ and node.func.value.id == "traceback"
+ and node.func.attr == "format_exc"
+ for node in ast.walk(process_di_document_node)
+ )
+
+ if not uses_traceback_format_exc:
+ print("❌ process_di_document no longer calls traceback.format_exc(); expected regression guard missing")
+ return False
+
+ print("✅ process_di_document uses traceback.format_exc() without local shadowing")
+ return True
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ traceback_module = __import__("traceback")
+ traceback_module.print_exc()
+ return False
+
+
+if __name__ == "__main__":
+ tests = [
+ test_traceback_import_is_module_scoped,
+ test_process_di_document_has_no_local_traceback_import,
+ ]
+
+ results = []
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ results.append(test())
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_dual_foundry_agent_support.py b/functional_tests/test_dual_foundry_agent_support.py
new file mode 100644
index 00000000..331fef30
--- /dev/null
+++ b/functional_tests/test_dual_foundry_agent_support.py
@@ -0,0 +1,197 @@
+# test_dual_foundry_agent_support.py
+#!/usr/bin/env python3
+"""
+Functional test for dual Foundry agent support.
+Version: 0.239.154
+Implemented in: 0.239.154
+
+This test ensures that classic Foundry and new Foundry agent payloads both
+validate through the backend sanitizer, preserve separate settings, and that
+runtime/modal code paths include explicit support for the new_foundry type.
+"""
+
+import os
+import sys
+
+REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+APP_ROOT = os.path.join(REPO_ROOT, "application", "single_app")
+
+sys.path.append(APP_ROOT)
+
+from functions_agent_payload import AgentPayloadError, sanitize_agent_payload
+from json_schema_validation import validate_agent
+
+
+def read_file_text(*relative_parts):
+ file_path = os.path.join(REPO_ROOT, *relative_parts)
+ with open(file_path, "r", encoding="utf-8") as handle:
+ return handle.read()
+
+
+def test_classic_foundry_payload_still_validates():
+ """Classic Foundry payloads should remain valid and isolated."""
+ print("🔍 Testing classic Foundry payload validation...")
+
+ payload = {
+ "id": "123e4567-e89b-12d3-a456-426614174000",
+ "name": "classic_foundry_agent",
+ "display_name": "Classic Foundry Agent",
+ "description": "Classic Foundry path",
+ "instructions": "Managed in Foundry",
+ "agent_type": "aifoundry",
+ "actions_to_load": ["pluginA"],
+ "enable_agent_gpt_apim": True,
+ "azure_openai_gpt_endpoint": "https://example.services.ai.azure.com",
+ "azure_openai_gpt_deployment": "sc-aifoundry",
+ "azure_openai_gpt_api_version": "v1",
+ "other_settings": {
+ "azure_ai_foundry": {
+ "agent_id": "asst_123",
+ "endpoint_id": "classic-endpoint"
+ }
+ },
+ "max_completion_tokens": 4096,
+ }
+
+ cleaned = sanitize_agent_payload(payload)
+ validation_error = validate_agent(cleaned)
+
+ assert cleaned["agent_type"] == "aifoundry"
+ assert cleaned["actions_to_load"] == []
+ assert cleaned["enable_agent_gpt_apim"] is False
+ assert cleaned["other_settings"]["azure_ai_foundry"]["agent_id"] == "asst_123"
+ assert "new_foundry" not in cleaned["other_settings"]
+ assert validation_error is None, validation_error
+ print("✅ Classic Foundry payload validation passed.")
+
+
+def test_new_foundry_payload_validates_and_stays_separate():
+ """New Foundry payloads should sanitize and validate independently."""
+ print("🔍 Testing new Foundry payload validation...")
+
+ payload = {
+ "id": "123e4567-e89b-12d3-a456-426614174001",
+ "name": "new_foundry_agent",
+ "display_name": "New Foundry Agent",
+ "description": "New Foundry path",
+ "instructions": "Managed in Foundry",
+ "agent_type": "new_foundry",
+ "actions_to_load": ["pluginA", "pluginB"],
+ "enable_agent_gpt_apim": True,
+ "azure_openai_gpt_endpoint": "https://nadoyle-foundry.services.ai.azure.com",
+ "azure_openai_gpt_deployment": "sc-aifoundry",
+ "azure_openai_gpt_api_version": "2025-11-15-preview",
+ "other_settings": {
+ "new_foundry": {
+ "application_id": "new-foundry-agent-not-openai:3",
+ "application_name": "new-foundry-agent-not-openai",
+ "application_version": "3",
+ "endpoint_id": "new-foundry-endpoint",
+ "responses_api_version": "2025-11-15-preview",
+ "activity_api_version": "2025-11-15-preview",
+ "notes": "phase-1 validation"
+ }
+ },
+ "max_completion_tokens": 4096,
+ }
+
+ cleaned = sanitize_agent_payload(payload)
+ validation_error = validate_agent(cleaned)
+
+ assert cleaned["agent_type"] == "new_foundry"
+ assert cleaned["actions_to_load"] == []
+ assert cleaned["enable_agent_gpt_apim"] is False
+ assert cleaned["other_settings"]["new_foundry"]["application_id"] == "new-foundry-agent-not-openai:3"
+ assert cleaned["other_settings"]["new_foundry"]["responses_api_version"] == "2025-11-15-preview"
+ assert "azure_ai_foundry" not in cleaned["other_settings"]
+ assert validation_error is None, validation_error
+ print("✅ New Foundry payload validation passed.")
+
+
+def test_new_foundry_requires_application_reference():
+ """New Foundry payloads should fail without an application reference."""
+ print("🔍 Testing new Foundry application reference requirement...")
+
+ payload = {
+ "id": "123e4567-e89b-12d3-a456-426614174002",
+ "name": "missing_new_foundry_app",
+ "display_name": "Missing New Foundry App",
+ "description": "Invalid new Foundry payload",
+ "instructions": "Managed in Foundry",
+ "agent_type": "new_foundry",
+ "actions_to_load": [],
+ "azure_openai_gpt_endpoint": "https://nadoyle-foundry.services.ai.azure.com",
+ "azure_openai_gpt_deployment": "sc-aifoundry",
+ "azure_openai_gpt_api_version": "2025-11-15-preview",
+ "other_settings": {
+ "new_foundry": {
+ "responses_api_version": "2025-11-15-preview"
+ }
+ },
+ "max_completion_tokens": 4096,
+ }
+
+ try:
+ sanitize_agent_payload(payload)
+ except AgentPayloadError as exc:
+ assert "application" in str(exc).lower()
+ print("✅ Missing New Foundry application reference correctly rejected.")
+ return
+
+ raise AssertionError("Expected AgentPayloadError for missing New Foundry application reference")
+
+
+def test_dual_foundry_runtime_and_modal_hooks_exist():
+ """Runtime, loader, and modal files should include explicit dual Foundry support hooks."""
+ print("🔍 Verifying runtime and modal support hooks...")
+
+ runtime_content = read_file_text("application", "single_app", "foundry_agent_runtime.py")
+ loader_content = read_file_text("application", "single_app", "semantic_kernel_loader.py")
+ modal_js_content = read_file_text("application", "single_app", "static", "js", "agent_modal_stepper.js")
+ modal_html_content = read_file_text("application", "single_app", "templates", "_agent_modal.html")
+
+ required_snippets = [
+ (runtime_content, "class AzureAIFoundryNewChatCompletionAgent"),
+ (runtime_content, "execute_new_foundry_agent"),
+ (loader_content, 'if agent_type in {"aifoundry", "new_foundry"}:'),
+ (loader_content, '("aoai", "aifoundry", "new_foundry")'),
+ (modal_js_content, "selectedAgentType === 'new_foundry'"),
+ (modal_js_content, "getAgentTypeLabel"),
+ (modal_html_content, 'value="new_foundry"'),
+ (modal_html_content, "agent-new-foundry-application-id"),
+ ]
+
+ missing = [snippet for content, snippet in required_snippets if snippet not in content]
+ if missing:
+ raise AssertionError(f"Missing expected dual Foundry support hooks: {', '.join(missing)}")
+
+ print("✅ Runtime, loader, and modal hooks verified.")
+
+
+def run_tests():
+ tests = [
+ test_classic_foundry_payload_still_validates,
+ test_new_foundry_payload_validates_and_stays_separate,
+ test_new_foundry_requires_application_reference,
+ test_dual_foundry_runtime_and_modal_hooks_exist,
+ ]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ raise SystemExit(0 if run_tests() else 1)
diff --git a/functional_tests/test_endpoints_tab_order_visibility.py b/functional_tests/test_endpoints_tab_order_visibility.py
new file mode 100644
index 00000000..7fdc0fdd
--- /dev/null
+++ b/functional_tests/test_endpoints_tab_order_visibility.py
@@ -0,0 +1,95 @@
+# test_endpoints_tab_order_visibility.py
+#!/usr/bin/env python3
+"""
+Functional test for workspace/group endpoints tab order and visibility.
+Version: 0.236.046
+Implemented in: 0.236.046
+
+This test ensures endpoints tabs appear after actions and are gated by admin
+custom endpoint settings.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file:
+ return file.read()
+
+
+def test_workspace_endpoints_tab_order_visibility():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ template_path = os.path.join(repo_root, "application", "single_app", "templates", "workspace.html")
+ content = read_file_text(template_path)
+
+ agents_idx = content.find('id="agents-tab-btn"')
+ actions_idx = content.find('id="plugins-tab-btn"')
+ endpoints_idx = content.find('id="endpoints-tab-btn"')
+
+ assert agents_idx != -1, "Agents tab button missing in workspace template."
+ assert actions_idx != -1, "Actions tab button missing in workspace template."
+ assert endpoints_idx != -1, "Endpoints tab button missing in workspace template."
+ assert agents_idx < actions_idx < endpoints_idx, "Workspace tab order should be Agents -> Actions -> Endpoints."
+
+ actions_pane_idx = content.find('id="plugins-tab"')
+ endpoints_pane_idx = content.find('id="endpoints-tab"')
+ assert actions_pane_idx != -1, "Actions tab pane missing in workspace template."
+ assert endpoints_pane_idx != -1, "Endpoints tab pane missing in workspace template."
+ assert actions_pane_idx < endpoints_pane_idx, "Endpoints tab pane should appear after actions pane."
+
+ assert "settings.allow_user_custom_agent_endpoints" in content, "Workspace endpoints should be gated by custom endpoint settings."
+
+ print("✅ Workspace endpoints tab order and visibility verified.")
+
+
+def test_group_endpoints_tab_order_visibility():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ template_path = os.path.join(repo_root, "application", "single_app", "templates", "group_workspaces.html")
+ content = read_file_text(template_path)
+
+ agents_idx = content.find('id="group-agents-tab-btn"')
+ actions_idx = content.find('id="group-plugins-tab-btn"')
+ endpoints_idx = content.find('id="group-endpoints-tab-btn"')
+
+ assert agents_idx != -1, "Group agents tab button missing in group workspace template."
+ assert actions_idx != -1, "Group actions tab button missing in group workspace template."
+ assert endpoints_idx != -1, "Group endpoints tab button missing in group workspace template."
+ assert agents_idx < actions_idx < endpoints_idx, "Group tab order should be Agents -> Actions -> Endpoints."
+
+ actions_pane_idx = content.find('id="group-plugins-tab"')
+ endpoints_pane_idx = content.find('id="group-endpoints-tab"')
+ assert actions_pane_idx != -1, "Group actions tab pane missing in group workspace template."
+ assert endpoints_pane_idx != -1, "Group endpoints tab pane missing in group workspace template."
+ assert actions_pane_idx < endpoints_pane_idx, "Group endpoints tab pane should appear after actions pane."
+
+ assert "settings.allow_group_custom_agent_endpoints" in content, "Group endpoints should be gated by custom endpoint settings."
+
+ print("✅ Group endpoints tab order and visibility verified.")
+
+
+def run_tests():
+ tests = [
+ test_workspace_endpoints_tab_order_visibility,
+ test_group_endpoints_tab_order_visibility
+ ]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ raise SystemExit(0 if run_tests() else 1)
diff --git a/functional_tests/test_foundry_agent_endpoint_resolution.py b/functional_tests/test_foundry_agent_endpoint_resolution.py
new file mode 100644
index 00000000..6dd613c6
--- /dev/null
+++ b/functional_tests/test_foundry_agent_endpoint_resolution.py
@@ -0,0 +1,58 @@
+# test_foundry_agent_endpoint_resolution.py
+#!/usr/bin/env python3
+"""
+Functional test for Foundry agent endpoint resolution enrichment.
+Version: 0.236.051
+Implemented in: 0.236.051
+
+This test ensures Foundry agent endpoint configuration is enriched with
+project_name and supports endpoint_id fallback when model_endpoint_id is missing.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file:
+ return file.read()
+
+
+def test_foundry_agent_endpoint_resolution_enrichment():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ loader_path = os.path.join(repo_root, "application", "single_app", "semantic_kernel_loader.py")
+
+ loader_content = read_file_text(loader_path)
+
+ assert "foundry_settings[\"project_name\"]" in loader_content, (
+ "Expected Foundry settings to include project_name enrichment."
+ )
+ assert "foundry_settings.get(\"endpoint_id\")" in loader_content, (
+ "Expected endpoint_id fallback to be available in Foundry resolution."
+ )
+
+ print("✅ Foundry agent endpoint resolution enrichment verified.")
+
+
+def run_tests():
+ tests = [test_foundry_agent_endpoint_resolution_enrichment]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ raise SystemExit(0 if run_tests() else 1)
diff --git a/functional_tests/test_foundry_agent_list_async_paging.py b/functional_tests/test_foundry_agent_list_async_paging.py
new file mode 100644
index 00000000..649a240d
--- /dev/null
+++ b/functional_tests/test_foundry_agent_list_async_paging.py
@@ -0,0 +1,53 @@
+# test_foundry_agent_list_async_paging.py
+#!/usr/bin/env python3
+"""
+Functional test for Foundry agent list async paging handling.
+Version: 0.236.047
+Implemented in: 0.236.047
+
+This test ensures Foundry agent listing avoids awaiting AsyncItemPaged
+and iterates async results safely.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file:
+ return file.read()
+
+
+def test_foundry_agent_list_async_paging():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ runtime_path = os.path.join(repo_root, "application", "single_app", "foundry_agent_runtime.py")
+ content = read_file_text(runtime_path)
+
+ assert "async for item in result" in content, "Expected async iteration over agent list results."
+ assert "return agents_client.list_agents()" in content, "list_agents should not be awaited."
+
+ print("✅ Foundry agent list async paging handling verified.")
+
+
+def run_tests():
+ tests = [test_foundry_agent_list_async_paging]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ raise SystemExit(0 if run_tests() else 1)
diff --git a/functional_tests/test_foundry_agent_list_project_endpoint.py b/functional_tests/test_foundry_agent_list_project_endpoint.py
new file mode 100644
index 00000000..c49e9fb3
--- /dev/null
+++ b/functional_tests/test_foundry_agent_list_project_endpoint.py
@@ -0,0 +1,56 @@
+# test_foundry_agent_list_project_endpoint.py
+#!/usr/bin/env python3
+"""
+Functional test for Foundry agent list project endpoint resolution.
+Version: 0.236.048
+Implemented in: 0.236.048
+
+This test ensures project names are used to append /api/projects/
+when listing Foundry agents.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file:
+ return file.read()
+
+
+def test_foundry_agent_list_project_endpoint_resolution():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ runtime_path = os.path.join(repo_root, "application", "single_app", "foundry_agent_runtime.py")
+ models_path = os.path.join(repo_root, "application", "single_app", "route_backend_models.py")
+
+ runtime_content = read_file_text(runtime_path)
+ models_content = read_file_text(models_path)
+
+ assert "project_name" in models_content, "Expected Foundry settings to include project_name."
+ assert "/api/projects/" in runtime_content, "Expected endpoint normalization to include /api/projects/."
+
+ print("✅ Foundry agent list project endpoint resolution verified.")
+
+
+def run_tests():
+ tests = [test_foundry_agent_list_project_endpoint_resolution]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ raise SystemExit(0 if run_tests() else 1)
diff --git a/functional_tests/test_foundry_chat_scope_resolution.py b/functional_tests/test_foundry_chat_scope_resolution.py
new file mode 100644
index 00000000..a40c9199
--- /dev/null
+++ b/functional_tests/test_foundry_chat_scope_resolution.py
@@ -0,0 +1,34 @@
+# test_foundry_chat_scope_resolution.py
+#!/usr/bin/env python3
+"""
+Functional test for Foundry chat scope resolution.
+Version: 0.236.030
+Implemented in: 0.236.030
+
+This test ensures multi-endpoint chat inference uses cloud-aware Foundry scopes.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_foundry_chat_scope_resolution():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ backend_path = os.path.join(repo_root, 'application', 'single_app', 'route_backend_chats.py')
+ content = read_file_text(backend_path)
+
+ assert "def resolve_foundry_scope_for_auth" in content, "Missing Foundry scope resolver for chat routes."
+ assert "https://ai.azure.com/.default" in content, "Expected public Foundry scope constant."
+ assert "https://ai.azure.us/.default" in content, "Expected government Foundry scope constant."
+ assert "foundry_scope" in content, "Expected custom Foundry scope override field."
+ assert "Multi-endpoint SP scope" in content, "Expected Foundry scope debug logging for service principal."
+
+ print("✅ Foundry chat scope resolution verified.")
+
+
+if __name__ == "__main__":
+ test_foundry_chat_scope_resolution()
diff --git a/functional_tests/test_foundry_deployment_disabled_filter.py b/functional_tests/test_foundry_deployment_disabled_filter.py
new file mode 100644
index 00000000..188b2965
--- /dev/null
+++ b/functional_tests/test_foundry_deployment_disabled_filter.py
@@ -0,0 +1,31 @@
+# test_foundry_deployment_disabled_filter.py
+#!/usr/bin/env python3
+"""
+Functional test for filtering disabled deployments in model discovery.
+Version: 0.236.025
+Implemented in: 0.236.025
+
+This test ensures deployment provisioning state filtering exists for AOAI/Foundry model lists.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_foundry_deployment_disabled_filter():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ backend_path = os.path.join(repo_root, 'application', 'single_app', 'route_backend_models.py')
+ content = read_file_text(backend_path)
+
+ assert 'def is_deployment_enabled' in content, "Missing deployment state filter helper."
+ assert 'provisioningState' in content or 'provisioning_state' in content, "Expected provisioning state extraction."
+
+ print("✅ Disabled deployments filtered from model lists.")
+
+
+if __name__ == "__main__":
+ test_foundry_deployment_disabled_filter()
diff --git a/functional_tests/test_foundry_endpoint_resolution.py b/functional_tests/test_foundry_endpoint_resolution.py
new file mode 100644
index 00000000..e17e5194
--- /dev/null
+++ b/functional_tests/test_foundry_endpoint_resolution.py
@@ -0,0 +1,73 @@
+# test_foundry_endpoint_resolution.py
+"""
+Functional test for Foundry endpoint resolution.
+Version: 0.236.060
+Implemented in: 0.236.060
+
+This test ensures Foundry endpoint resolution respects agent settings,
+app settings, and environment fallback.
+"""
+
+import os
+import sys
+
+repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+sys.path.append(repo_root)
+
+from application.single_app.semantic_kernel_loader import resolve_foundry_endpoint_from_settings
+
+
+def test_foundry_endpoint_resolution_priority():
+ """Agent settings should override global settings and env."""
+ print("🔍 Validating Foundry endpoint resolution priority...")
+
+ settings = {"azure_ai_foundry_endpoint": "https://global.example"}
+ foundry_settings = {"endpoint": "https://agent.example"}
+
+ resolved = resolve_foundry_endpoint_from_settings(foundry_settings, settings)
+ assert resolved == "https://agent.example"
+
+ print("✅ Foundry endpoint resolution priority passed.")
+
+
+def test_foundry_endpoint_resolution_fallbacks():
+ """Global settings should be used when agent endpoint is missing."""
+ print("🔍 Validating Foundry endpoint resolution fallback...")
+
+ settings = {"azure_ai_foundry_endpoint": "https://global.example"}
+ foundry_settings = {}
+
+ resolved = resolve_foundry_endpoint_from_settings(foundry_settings, settings)
+ assert resolved == "https://global.example"
+
+ print("✅ Foundry endpoint resolution fallback passed.")
+
+
+def run_tests():
+ tests = [
+ test_foundry_endpoint_resolution_priority,
+ test_foundry_endpoint_resolution_fallbacks,
+ ]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ success = run_tests()
+ sys.exit(0 if success else 1)
diff --git a/functional_tests/test_foundry_inference_scope_fix.py b/functional_tests/test_foundry_inference_scope_fix.py
new file mode 100644
index 00000000..644271bb
--- /dev/null
+++ b/functional_tests/test_foundry_inference_scope_fix.py
@@ -0,0 +1,33 @@
+# test_foundry_inference_scope_fix.py
+#!/usr/bin/env python3
+"""
+Functional test for Foundry scope selection by cloud.
+Version: 0.236.028
+Implemented in: 0.236.028
+
+This test ensures Foundry model inference uses cloud-specific scopes and supports custom scope overrides.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_foundry_inference_scope_fix():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ backend_path = os.path.join(repo_root, 'application', 'single_app', 'route_backend_models.py')
+ content = read_file_text(backend_path)
+
+ assert "def resolve_foundry_scope" in content, "Missing Foundry scope resolver."
+ assert "https://ai.azure.com/.default" in content, "Expected public Foundry scope constant."
+ assert "https://ai.azure.us/.default" in content, "Expected government Foundry scope constant."
+ assert "foundry_scope" in content, "Expected custom Foundry scope override field."
+
+ print("✅ Foundry scope selection verified.")
+
+
+if __name__ == "__main__":
+ test_foundry_inference_scope_fix()
diff --git a/functional_tests/test_foundry_management_fields_cleanup.py b/functional_tests/test_foundry_management_fields_cleanup.py
new file mode 100644
index 00000000..d8d460af
--- /dev/null
+++ b/functional_tests/test_foundry_management_fields_cleanup.py
@@ -0,0 +1,31 @@
+# test_foundry_management_fields_cleanup.py
+#!/usr/bin/env python3
+"""
+Functional test for Foundry management field cleanup.
+Version: 0.236.029
+Implemented in: 0.236.029
+
+This test ensures Foundry endpoints no longer use subscription/resource/location fields in the modal payload.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_foundry_management_fields_cleanup():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ js_path = os.path.join(repo_root, 'application', 'single_app', 'static', 'js', 'admin', 'admin_model_endpoints.js')
+ content = read_file_text(js_path)
+
+ assert "const management = provider === \"aoai\"" in content, "Expected management fields only for AOAI."
+ assert "endpointLocation" not in content, "Foundry/AOAI location field should be removed from the modal script."
+
+ print("✅ Foundry management field cleanup verified.")
+
+
+if __name__ == "__main__":
+ test_foundry_management_fields_cleanup()
diff --git a/functional_tests/test_foundry_model_fetch_sync_credentials.py b/functional_tests/test_foundry_model_fetch_sync_credentials.py
new file mode 100644
index 00000000..230ca172
--- /dev/null
+++ b/functional_tests/test_foundry_model_fetch_sync_credentials.py
@@ -0,0 +1,168 @@
+# test_foundry_model_fetch_sync_credentials.py
+#!/usr/bin/env python3
+"""
+Functional test for Foundry model fetch sync credential handling.
+Version: 0.239.156
+Implemented in: 0.239.156
+
+This test ensures sync Foundry model discovery helpers use synchronous Azure
+credentials so token retrieval returns a token object instead of a coroutine.
+"""
+
+import importlib
+import inspect
+import os
+import sys
+import types
+
+
+REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+SINGLE_APP_ROOT = os.path.join(REPO_ROOT, "application", "single_app")
+sys.path.insert(0, SINGLE_APP_ROOT)
+sys.path.insert(0, REPO_ROOT)
+
+
+def restore_modules(original_modules):
+ for module_name, original_module in original_modules.items():
+ if original_module is None:
+ sys.modules.pop(module_name, None)
+ else:
+ sys.modules[module_name] = original_module
+
+
+def load_foundry_agent_runtime_module():
+ functions_appinsights_stub = types.ModuleType("functions_appinsights")
+ functions_appinsights_stub.log_event = lambda *args, **kwargs: None
+
+ functions_debug_stub = types.ModuleType("functions_debug")
+ functions_debug_stub.debug_print = lambda *args, **kwargs: None
+
+ functions_keyvault_stub = types.ModuleType("functions_keyvault")
+ functions_keyvault_stub.retrieve_secret_from_key_vault_by_full_name = lambda value: value
+ functions_keyvault_stub.validate_secret_name_dynamic = lambda value: False
+
+ requests_stub = types.ModuleType("requests")
+
+ class Response:
+ pass
+
+ requests_stub.Response = Response
+ requests_stub.get = lambda *args, **kwargs: None
+ requests_stub.post = lambda *args, **kwargs: None
+
+ azure_stub = types.ModuleType("azure")
+ azure_identity_stub = types.ModuleType("azure.identity")
+ azure_identity_aio_stub = types.ModuleType("azure.identity.aio")
+
+ class SyncToken:
+ def __init__(self, value):
+ self.token = value
+
+ class SyncDefaultAzureCredential:
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+ def get_token(self, scope):
+ return SyncToken(f"sync:{scope}")
+
+ def close(self):
+ return None
+
+ class SyncClientSecretCredential(SyncDefaultAzureCredential):
+ pass
+
+ class AsyncDefaultAzureCredential:
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+ async def get_token(self, scope):
+ return SyncToken(f"async:{scope}")
+
+ async def close(self):
+ return None
+
+ class AsyncClientSecretCredential(AsyncDefaultAzureCredential):
+ pass
+
+ class AzureAuthorityHosts:
+ AZURE_PUBLIC_CLOUD = "public"
+ AZURE_GOVERNMENT = "government"
+
+ azure_identity_stub.AzureAuthorityHosts = AzureAuthorityHosts
+ azure_identity_stub.ClientSecretCredential = SyncClientSecretCredential
+ azure_identity_stub.DefaultAzureCredential = SyncDefaultAzureCredential
+ azure_identity_aio_stub.ClientSecretCredential = AsyncClientSecretCredential
+ azure_identity_aio_stub.DefaultAzureCredential = AsyncDefaultAzureCredential
+
+ semantic_kernel_stub = types.ModuleType("semantic_kernel")
+ semantic_kernel_agents_stub = types.ModuleType("semantic_kernel.agents")
+ semantic_kernel_contents_stub = types.ModuleType("semantic_kernel.contents")
+ semantic_kernel_chat_stub = types.ModuleType("semantic_kernel.contents.chat_message_content")
+
+ class AzureAIAgent:
+ @staticmethod
+ def create_client(*args, **kwargs):
+ raise AssertionError("create_client should not be used in this regression test")
+
+ class ChatMessageContent:
+ pass
+
+ semantic_kernel_agents_stub.AzureAIAgent = AzureAIAgent
+ semantic_kernel_chat_stub.ChatMessageContent = ChatMessageContent
+
+ original_modules = {}
+ module_stubs = {
+ "functions_appinsights": functions_appinsights_stub,
+ "functions_debug": functions_debug_stub,
+ "functions_keyvault": functions_keyvault_stub,
+ "requests": requests_stub,
+ "azure": azure_stub,
+ "azure.identity": azure_identity_stub,
+ "azure.identity.aio": azure_identity_aio_stub,
+ "semantic_kernel": semantic_kernel_stub,
+ "semantic_kernel.agents": semantic_kernel_agents_stub,
+ "semantic_kernel.contents": semantic_kernel_contents_stub,
+ "semantic_kernel.contents.chat_message_content": semantic_kernel_chat_stub,
+ }
+
+ for module_name, module_stub in module_stubs.items():
+ original_modules[module_name] = sys.modules.get(module_name)
+ sys.modules[module_name] = module_stub
+
+ original_modules["foundry_agent_runtime"] = sys.modules.get("foundry_agent_runtime")
+ sys.modules.pop("foundry_agent_runtime", None)
+ module = importlib.import_module("foundry_agent_runtime")
+ return module, original_modules
+
+
+def test_build_project_credential_returns_sync_token_provider():
+ """Ensure sync discovery helper uses sync Azure credentials."""
+ print("🔍 Testing Foundry sync discovery credential builder...")
+ module, original_modules = load_foundry_agent_runtime_module()
+
+ try:
+ credential = module.build_project_credential({"type": "managed_identity"})
+ token = credential.get_token("https://ai.azure.com/.default")
+
+ assert not inspect.iscoroutine(token)
+ assert token.token == "sync:https://ai.azure.com/.default"
+ finally:
+ restore_modules(original_modules)
+
+ print("✅ Foundry sync discovery credential builder passed.")
+
+
+if __name__ == "__main__":
+ success = True
+ try:
+ test_build_project_credential_returns_sync_token_provider()
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ success = False
+
+ raise SystemExit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_foundry_model_list_fallback.py b/functional_tests/test_foundry_model_list_fallback.py
new file mode 100644
index 00000000..30159a2a
--- /dev/null
+++ b/functional_tests/test_foundry_model_list_fallback.py
@@ -0,0 +1,32 @@
+# test_foundry_model_list_fallback.py
+#!/usr/bin/env python3
+"""
+Functional test for Foundry project deployments discovery.
+Version: 0.236.026
+Implemented in: 0.236.026
+
+This test ensures Foundry model discovery uses the project deployments list endpoint.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_foundry_model_list_fallback():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ backend_path = os.path.join(repo_root, 'application', 'single_app', 'route_backend_models.py')
+ content = read_file_text(backend_path)
+
+ assert "fetch_foundry_project_deployments" in content, "Expected project deployments discovery helper."
+ assert "/deployments" in content, "Expected Foundry project deployments list endpoint."
+ assert "https://ai.azure.com/.default" in content, "Expected Foundry project scope for discovery token."
+
+ print("✅ Foundry project deployments discovery verified.")
+
+
+if __name__ == "__main__":
+ test_foundry_model_list_fallback()
diff --git a/functional_tests/test_group_agent_conversation_metadata_fix.py b/functional_tests/test_group_agent_conversation_metadata_fix.py
index aa45f493..f651c3fd 100644
--- a/functional_tests/test_group_agent_conversation_metadata_fix.py
+++ b/functional_tests/test_group_agent_conversation_metadata_fix.py
@@ -2,8 +2,8 @@
# test_group_agent_conversation_metadata_fix.py
"""
Functional test for group agent conversation metadata fix.
-Version: 0.233.161
-Implemented in: 0.233.161
+Version: 0.239.166
+Implemented in: 0.239.166
This test ensures that conversations using group agents receive the correct
primary context and chat type metadata even when no documents are retrieved.
@@ -11,9 +11,15 @@
import sys
import os
+from pathlib import Path
from unittest.mock import patch
-sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
+ROOT = Path(__file__).resolve().parents[1]
+APP_DIR = ROOT / "application" / "single_app"
+
+if str(APP_DIR) not in sys.path:
+ sys.path.insert(0, str(APP_DIR))
def test_group_agent_primary_context():
diff --git a/functional_tests/test_group_agent_selection_scope.py b/functional_tests/test_group_agent_selection_scope.py
new file mode 100644
index 00000000..a2f37070
--- /dev/null
+++ b/functional_tests/test_group_agent_selection_scope.py
@@ -0,0 +1,95 @@
+# test_group_agent_selection_scope.py
+"""
+Functional test for scope-aware agent selection.
+Version: 0.236.059
+Implemented in: 0.236.059
+
+This test ensures scope-aware agent matching respects group IDs and global/personal scopes.
+"""
+
+import os
+import sys
+
+repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+sys.path.append(repo_root)
+
+from application.single_app.semantic_kernel_loader import find_agent_by_scope
+
+
+def test_group_agent_selection_requires_group_id_match():
+ """Ensure group agent selection only matches when group_id aligns."""
+ print("🔍 Validating group agent scope matching...")
+
+ agents_cfg = [
+ {"id": "g1", "name": "group-web-search", "is_group": True, "group_id": "group-a"},
+ {"id": "p1", "name": "group-web-search", "is_group": False, "is_global": False}
+ ]
+
+ selected_agent = {
+ "id": "g1",
+ "name": "group-web-search",
+ "is_group": True,
+ "group_id": "group-b"
+ }
+
+ resolved = find_agent_by_scope(agents_cfg, selected_agent)
+ assert resolved is None
+
+ selected_agent["group_id"] = "group-a"
+ resolved = find_agent_by_scope(agents_cfg, selected_agent)
+ assert resolved is not None
+ assert resolved.get("id") == "g1"
+
+ print("✅ Group agent scope matching passed.")
+
+
+def test_global_vs_personal_agent_selection():
+ """Ensure global and personal agents with same name are disambiguated by scope."""
+ print("🔍 Validating global vs personal agent selection...")
+
+ agents_cfg = [
+ {"id": "g1", "name": "researcher", "is_global": True, "is_group": False},
+ {"id": "p1", "name": "researcher", "is_global": False, "is_group": False}
+ ]
+
+ selected_global = {"name": "researcher", "is_global": True, "is_group": False}
+ resolved_global = find_agent_by_scope(agents_cfg, selected_global)
+ assert resolved_global is not None
+ assert resolved_global.get("id") == "g1"
+
+ selected_personal = {"name": "researcher", "is_global": False, "is_group": False}
+ resolved_personal = find_agent_by_scope(agents_cfg, selected_personal)
+ assert resolved_personal is not None
+ assert resolved_personal.get("id") == "p1"
+
+ print("✅ Global vs personal selection passed.")
+
+
+def run_tests():
+ tests = [
+ test_group_agent_selection_requires_group_id_match,
+ test_global_vs_personal_agent_selection
+ ]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ success = run_tests()
+ sys.exit(0 if success else 1)
diff --git a/functional_tests/test_group_member_deleted_activity_logging.py b/functional_tests/test_group_member_deleted_activity_logging.py
index a503d25e..37530c8f 100644
--- a/functional_tests/test_group_member_deleted_activity_logging.py
+++ b/functional_tests/test_group_member_deleted_activity_logging.py
@@ -232,7 +232,7 @@ def test_group_member_deleted_logging():
item=log['id'],
partition_key=log['user_id']
)
- except:
+ except Exception as ex:
pass
print("✓ Activity logs cleaned up")
diff --git a/functional_tests/test_image_generation_model_compatibility.py b/functional_tests/test_image_generation_model_compatibility.py
index 07aa0bea..98bbee32 100644
--- a/functional_tests/test_image_generation_model_compatibility.py
+++ b/functional_tests/test_image_generation_model_compatibility.py
@@ -145,7 +145,7 @@ def test_image_response_validation():
else:
generated_image_url = image_data['url']
is_valid = generated_image_url and generated_image_url != 'null'
- except:
+ except Exception as ex:
is_valid = False
if is_valid == expected:
diff --git a/functional_tests/test_logged_core_plugins.py b/functional_tests/test_logged_core_plugins.py
new file mode 100644
index 00000000..955f294d
--- /dev/null
+++ b/functional_tests/test_logged_core_plugins.py
@@ -0,0 +1,177 @@
+# test_logged_core_plugins.py
+"""
+Functional test for logged core Semantic Kernel plugins.
+Version: 0.239.153
+Implemented in: 0.239.153
+
+This test ensures that the SimpleChat subclasses for Semantic Kernel core plugins
+emit plugin invocation logs for inherited and custom methods and that invocation
+callbacks can be converted into thought records for both success and failure cases.
+"""
+
+import asyncio
+import sys
+import types
+
+
+def create_invocation(plugin_name, function_name, parameters, result=None, success=True, error_message=None, duration_ms=1234):
+ """Create a lightweight invocation object for formatter tests."""
+ return types.SimpleNamespace(
+ plugin_name=plugin_name,
+ function_name=function_name,
+ parameters=parameters,
+ result=result,
+ success=success,
+ error_message=error_message,
+ duration_ms=duration_ms,
+ )
+
+
+def install_test_stubs():
+ """Install lightweight stubs for application services used by the logger."""
+ functions_appinsights = types.ModuleType('functions_appinsights')
+ functions_appinsights.log_event = lambda *args, **kwargs: None
+ functions_appinsights.get_appinsights_logger = lambda: None
+ sys.modules['functions_appinsights'] = functions_appinsights
+
+ functions_authentication = types.ModuleType('functions_authentication')
+ functions_authentication.get_current_user_id = lambda: None
+ sys.modules['functions_authentication'] = functions_authentication
+
+ functions_debug = types.ModuleType('functions_debug')
+ functions_debug.debug_print = lambda *args, **kwargs: None
+ sys.modules['functions_debug'] = functions_debug
+
+
+class FakeThoughtTracker:
+ """Capture thought writes in memory for assertions."""
+
+ def __init__(self):
+ self.events = []
+
+ def add_thought(self, step_type, content, detail=None):
+ self.events.append({
+ 'step_type': step_type,
+ 'content': content,
+ 'detail': detail,
+ })
+
+
+def test_logged_core_plugins():
+ """Validate logged core plugin subclasses and thought callback mapping."""
+ repo_root = r'c:\Repos\simplechatmsft\application\single_app'
+ semantic_kernel_repo_root = r'c:\Repos\semantic-kernel\python'
+ if repo_root not in sys.path:
+ sys.path.insert(0, repo_root)
+ if semantic_kernel_repo_root not in sys.path:
+ sys.path.insert(0, semantic_kernel_repo_root)
+
+ install_test_stubs()
+
+ from semantic_kernel_plugins.math_plugin import MathPlugin
+ from semantic_kernel_plugins.plugin_invocation_logger import get_plugin_logger
+ from semantic_kernel_plugins.plugin_invocation_thoughts import (
+ format_plugin_invocation_start_thought,
+ format_plugin_invocation_thought,
+ register_plugin_invocation_thought_callback,
+ )
+ from semantic_kernel_plugins.text_plugin import TextPlugin
+ from semantic_kernel_plugins.time_plugin import TimePlugin
+ from semantic_kernel_plugins.wait_plugin import WaitPlugin
+
+ plugin_logger = get_plugin_logger()
+ plugin_logger.clear_history()
+ plugin_logger.deregister_callbacks('None:None')
+
+ thought_tracker = FakeThoughtTracker()
+ callback_key = register_plugin_invocation_thought_callback(
+ plugin_logger,
+ thought_tracker,
+ None,
+ None,
+ actor_label='Kernel'
+ )
+
+ math_plugin = MathPlugin()
+ text_plugin = TextPlugin()
+ time_plugin = TimePlugin()
+ wait_plugin = WaitPlugin()
+
+ print('Testing logged core plugin invocations...')
+ assert math_plugin.add('2', '3') == 5.0
+ assert math_plugin.multiply(4, 5) == 20.0
+ assert text_plugin.trim(' hello ') == 'hello'
+ assert isinstance(time_plugin.date(), str)
+ asyncio.run(wait_plugin.wait('0'))
+
+ try:
+ math_plugin.divide('1', '0')
+ except ValueError:
+ print('Captured expected divide-by-zero failure')
+ else:
+ raise AssertionError('Expected divide-by-zero failure was not raised')
+
+ plugin_logger.deregister_callbacks(callback_key)
+
+ recent_invocations = plugin_logger.get_recent_invocations(10)
+ recent_names = [f"{inv.plugin_name}.{inv.function_name}" for inv in recent_invocations]
+ print(f'Recent invocations: {recent_names}')
+
+ assert 'MathPlugin.Add' in recent_names
+ assert 'MathPlugin.Multiply' in recent_names
+ assert 'TextPlugin.trim' in recent_names
+ assert 'TimePlugin.date' in recent_names
+ assert 'WaitPlugin.wait' in recent_names
+ assert 'MathPlugin.Divide' in recent_names
+
+ divide_failure = next(inv for inv in recent_invocations if inv.plugin_name == 'MathPlugin' and inv.function_name == 'Divide')
+ assert divide_failure.success is False
+ assert divide_failure.error_message is not None
+ assert 'divide by zero' in divide_failure.error_message.lower()
+
+ thought_contents = [event['content'] for event in thought_tracker.events]
+ print(f'Thought events: {thought_contents}')
+
+ assert any('Invoking MathPlugin.Add' in content for content in thought_contents)
+ assert any('Invoking WaitPlugin.wait' in content for content in thought_contents)
+ assert any('Kernel performed math: 2 + 3 = 5.0' in content for content in thought_contents)
+ assert any('Kernel invoked wait for 0 seconds' in content for content in thought_contents)
+ assert any('Kernel performed math: 1 / 0' in content for content in thought_contents)
+ assert any(
+ event['detail'] and 'error=' in event['detail']
+ for event in thought_tracker.events
+ if 'Kernel performed math: 1 / 0' in event['content']
+ )
+
+ generic_thought = format_plugin_invocation_thought(
+ create_invocation(
+ 'OpenApiPlugin',
+ 'topNews',
+ {'query': 'latest space weather', 'limit': 5},
+ result='ok',
+ duration_ms=88,
+ ),
+ actor_label='Agent'
+ )
+ print(f"Generic thought payload: {generic_thought}")
+ assert 'Agent executed OpenApiPlugin.topNews' in generic_thought['content']
+ assert 'query=latest space weather' in generic_thought['content'] or 'query=latest space weat...' in generic_thought['content']
+
+ start_thought = format_plugin_invocation_start_thought(
+ create_invocation(
+ 'WaitPlugin',
+ 'wait',
+ {'input': 30},
+ )
+ )
+ print(f"Start thought payload: {start_thought}")
+ assert start_thought['content'] == 'Invoking WaitPlugin.wait'
+ assert 'input=30' in start_thought['detail']
+
+ print('All logged core plugin checks passed')
+ return True
+
+
+if __name__ == '__main__':
+ success = test_logged_core_plugins()
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_metadata_detection_utility.py b/functional_tests/test_metadata_detection_utility.py
index 35bb1fb8..fcaa824a 100644
--- a/functional_tests/test_metadata_detection_utility.py
+++ b/functional_tests/test_metadata_detection_utility.py
@@ -7,7 +7,7 @@ def test_metadata_detection(metadata_value):
try:
metadata_obj = json.loads(metadata or '{}')
has_metadata = len(metadata_obj) > 0
- except:
+ except Exception as ex:
has_metadata = len(metadata) > 0 and metadata != '{}'
return has_metadata
diff --git a/functional_tests/test_model_endpoint_normalization_backend.py b/functional_tests/test_model_endpoint_normalization_backend.py
new file mode 100644
index 00000000..acc64aa3
--- /dev/null
+++ b/functional_tests/test_model_endpoint_normalization_backend.py
@@ -0,0 +1,116 @@
+# test_model_endpoint_normalization_backend.py
+"""
+Functional test for backend model endpoint normalization.
+Version: 0.239.155
+Implemented in: 0.239.155
+
+This test ensures model endpoints are normalized with stable IDs and enabled
+flags so frontend consumers receive consistent identifiers.
+"""
+
+import os
+import sys
+import importlib
+import json
+import types
+
+repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+single_app_root = os.path.join(repo_root, "application", "single_app")
+sys.path.append(repo_root)
+sys.path.append(single_app_root)
+
+
+def _restore_modules(original_modules):
+ for module_name, original_module in original_modules.items():
+ if original_module is None:
+ sys.modules.pop(module_name, None)
+ else:
+ sys.modules[module_name] = original_module
+
+
+def _load_functions_settings_module():
+ config_stub = types.ModuleType("config")
+ config_stub.json = json
+
+ appinsights_stub = types.ModuleType("functions_appinsights")
+ appinsights_stub.log_event = lambda *args, **kwargs: None
+
+ cache_stub = types.ModuleType("app_settings_cache")
+ cache_stub.get_settings_cache = lambda: None
+ cache_stub.update_settings_cache = lambda settings: None
+
+ original_modules = {}
+ for module_name, module_stub in {
+ "config": config_stub,
+ "functions_appinsights": appinsights_stub,
+ "app_settings_cache": cache_stub,
+ }.items():
+ original_modules[module_name] = sys.modules.get(module_name)
+ sys.modules[module_name] = module_stub
+
+ original_modules["application.single_app.functions_settings"] = sys.modules.get("application.single_app.functions_settings")
+ sys.modules.pop("application.single_app.functions_settings", None)
+ module = importlib.import_module("application.single_app.functions_settings")
+ return module, original_modules
+
+
+def test_model_endpoint_normalization_backend():
+ """Ensure IDs and enabled flags are normalized on backend."""
+ print("🔍 Validating backend model endpoint normalization...")
+ functions_settings, original_modules = _load_functions_settings_module()
+
+ endpoints = [
+ {
+ "name": "Foundry Endpoint",
+ "has_api_key": True,
+ "has_client_secret": True,
+ "connection": {"endpoint": "https://foundry.example"},
+ "models": [
+ {
+ "deploymentName": "gpt-4o"
+ }
+ ]
+ }
+ ]
+
+ try:
+ normalized, changed = functions_settings.normalize_model_endpoints(endpoints)
+
+ assert changed is True
+ assert normalized[0]["id"] == "Foundry Endpoint"
+ assert normalized[0]["enabled"] is True
+ assert "has_api_key" not in normalized[0]
+ assert "has_client_secret" not in normalized[0]
+ assert normalized[0]["models"][0]["id"] == "gpt-4o"
+ assert normalized[0]["models"][0]["enabled"] is True
+ finally:
+ _restore_modules(original_modules)
+
+ print("✅ Backend model endpoint normalization passed.")
+
+
+def run_tests():
+ tests = [test_model_endpoint_normalization_backend]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ success = run_tests()
+ sys.exit(0 if success else 1)
diff --git a/functional_tests/test_model_endpoint_payload_auth_type_order.py b/functional_tests/test_model_endpoint_payload_auth_type_order.py
new file mode 100644
index 00000000..3378ba9f
--- /dev/null
+++ b/functional_tests/test_model_endpoint_payload_auth_type_order.py
@@ -0,0 +1,39 @@
+# test_model_endpoint_payload_auth_type_order.py
+#!/usr/bin/env python3
+"""
+Functional test for model endpoint payload auth type ordering.
+Version: 0.236.020
+Implemented in: 0.236.020
+
+This test ensures authType is defined before validation checks in buildEndpointPayload,
+so per-model test and fetch actions do not throw a reference error.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_model_endpoint_payload_auth_type_order():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ js_path = os.path.join(repo_root, 'application', 'single_app', 'static', 'js', 'admin', 'admin_model_endpoints.js')
+
+ content = read_file_text(js_path)
+ auth_type_index = content.find("const authType = endpointAuthTypeSelect")
+ foundry_check_index = content.find("provider === \"aifoundry\" && authType")
+ aoai_check_index = content.find("provider === \"aoai\" && authType")
+
+ assert auth_type_index != -1, "Expected authType assignment in buildEndpointPayload."
+ assert foundry_check_index != -1, "Expected Foundry validation using authType."
+ assert aoai_check_index != -1, "Expected AOAI validation using authType."
+ assert auth_type_index < foundry_check_index, "authType must be defined before Foundry validation."
+ assert auth_type_index < aoai_check_index, "authType must be defined before AOAI validation."
+
+ print("✅ buildEndpointPayload defines authType before validation checks.")
+
+
+if __name__ == "__main__":
+ test_model_endpoint_payload_auth_type_order()
diff --git a/functional_tests/test_model_endpoints_aoai_fetch_fix.py b/functional_tests/test_model_endpoints_aoai_fetch_fix.py
new file mode 100644
index 00000000..1d6199be
--- /dev/null
+++ b/functional_tests/test_model_endpoints_aoai_fetch_fix.py
@@ -0,0 +1,40 @@
+# test_model_endpoints_aoai_fetch_fix.py
+#!/usr/bin/env python3
+"""
+Functional test for Azure OpenAI model discovery in multi-endpoint modal.
+Version: 0.236.015
+Implemented in: 0.236.015
+
+This test ensures AOAI model discovery wiring includes resource group input in the modal,
+payload support in admin JS, and backend handling for AOAI in the models fetch route.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_model_endpoints_aoai_fetch_fix():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+
+ template_path = os.path.join(repo_root, 'application', 'single_app', 'templates', 'admin_settings.html')
+ admin_js_path = os.path.join(repo_root, 'application', 'single_app', 'static', 'js', 'admin', 'admin_model_endpoints.js')
+ models_route_path = os.path.join(repo_root, 'application', 'single_app', 'route_backend_models.py')
+
+ template_content = read_file_text(template_path)
+ admin_js_content = read_file_text(admin_js_path)
+ route_content = read_file_text(models_route_path)
+
+ assert 'id="model-endpoint-resource-group"' in template_content, "Resource group input missing from endpoint modal."
+ assert 'resource_group' in admin_js_content, "Admin endpoint payload missing resource_group handling."
+ assert 'provider == "aoai"' in route_content, "AOAI handling missing in model fetch route."
+ assert 'resource_group' in route_content, "Backend AOAI discovery missing resource group requirement."
+
+ print("✅ AOAI model discovery wiring validated.")
+
+
+if __name__ == "__main__":
+ test_model_endpoints_aoai_fetch_fix()
diff --git a/functional_tests/test_model_endpoints_api_key_manual_models.py b/functional_tests/test_model_endpoints_api_key_manual_models.py
new file mode 100644
index 00000000..ec210504
--- /dev/null
+++ b/functional_tests/test_model_endpoints_api_key_manual_models.py
@@ -0,0 +1,48 @@
+# test_model_endpoints_api_key_manual_models.py
+#!/usr/bin/env python3
+"""
+Functional test for API key manual model entry in endpoint modal.
+Version: 0.239.155
+Implemented in: 0.239.155
+
+This test ensures the API key flow exposes manual model entry UI,
+per-model test buttons, and management cloud fields for service principal.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_model_endpoints_api_key_manual_models():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+
+ template_path = os.path.join(repo_root, 'application', 'single_app', 'templates', '_multiendpoint_modal.html')
+ js_path = os.path.join(repo_root, 'application', 'single_app', 'static', 'js', 'admin', 'admin_model_endpoints.js')
+ backend_path = os.path.join(repo_root, 'application', 'single_app', 'route_backend_models.py')
+
+ template_content = read_file_text(template_path)
+ js_content = read_file_text(js_path)
+ backend_content = read_file_text(backend_path)
+
+ assert 'id="model-endpoint-api-key-note"' in template_content, "Missing API key inference-only note."
+ assert 'id="model-endpoint-add-model-btn"' in template_content, "Missing Add Model button for API key flow."
+ assert 'id="model-endpoint-management-cloud"' in template_content, "Missing management cloud selector."
+ assert 'id="model-endpoint-custom-authority"' in template_content, "Missing custom authority input."
+
+ assert 'addManualModel' in js_content, "Missing manual model add handler."
+ assert 'test-model' in js_content, "Missing per-model test action wiring."
+ assert 'management_cloud' in js_content, "Missing management cloud payload wiring."
+ assert 'const endpointId = endpointIdInput?.value.trim() || "";' in js_content, "Missing endpoint ID request wiring."
+
+ assert '/api/models/test-model' in backend_content, "Missing backend test-model endpoint."
+ assert 'resolve_request_endpoint_payload' in backend_content, "Missing stored-secret request resolution helper."
+
+ print("✅ API key manual model entry and per-model test wiring verified.")
+
+
+if __name__ == "__main__":
+ test_model_endpoints_api_key_manual_models()
diff --git a/functional_tests/test_model_endpoints_key_vault_secret_storage.py b/functional_tests/test_model_endpoints_key_vault_secret_storage.py
new file mode 100644
index 00000000..de14e629
--- /dev/null
+++ b/functional_tests/test_model_endpoints_key_vault_secret_storage.py
@@ -0,0 +1,237 @@
+#!/usr/bin/env python3
+# test_model_endpoints_key_vault_secret_storage.py
+"""
+Functional test for MultiGPT endpoint Key Vault secret storage.
+Version: 0.239.155
+Implemented in: 0.239.155
+
+This test ensures MultiGPT endpoint secrets are stored in Key Vault,
+returned to the UI as placeholders, resolved for backend use, and cleaned up
+when endpoint auth settings change.
+"""
+
+import importlib
+import os
+import sys
+import types
+
+
+REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+SINGLE_APP_ROOT = os.path.join(REPO_ROOT, "application", "single_app")
+sys.path.insert(0, SINGLE_APP_ROOT)
+sys.path.insert(0, REPO_ROOT)
+
+
+class FakeRetrievedSecret:
+ def __init__(self, value):
+ self.value = value
+
+
+class FakeSecretClient:
+ stored_secrets = {}
+ deleted_secrets = []
+
+ def __init__(self, vault_url, credential):
+ self.vault_url = vault_url
+ self.credential = credential
+
+ @classmethod
+ def reset(cls):
+ cls.stored_secrets = {}
+ cls.deleted_secrets = []
+
+ def set_secret(self, name, value):
+ FakeSecretClient.stored_secrets[name] = value
+
+ def get_secret(self, name):
+ return FakeRetrievedSecret(FakeSecretClient.stored_secrets[name])
+
+ def begin_delete_secret(self, name):
+ FakeSecretClient.deleted_secrets.append(name)
+ FakeSecretClient.stored_secrets.pop(name, None)
+
+
+def restore_modules(original_modules):
+ for module_name, original_module in original_modules.items():
+ if original_module is None:
+ sys.modules.pop(module_name, None)
+ else:
+ sys.modules[module_name] = original_module
+
+
+def load_functions_keyvault_module():
+ config_stub = types.ModuleType("config")
+ config_stub.KEY_VAULT_DOMAIN = ".vault.azure.net"
+
+ appinsights_stub = types.ModuleType("functions_appinsights")
+ appinsights_stub.log_event = lambda *args, **kwargs: None
+
+ auth_stub = types.ModuleType("functions_authentication")
+ settings_stub = types.ModuleType("functions_settings")
+
+ app_settings_cache_stub = types.ModuleType("app_settings_cache")
+ app_settings_cache_stub.get_settings_cache = lambda: {
+ "enable_key_vault_secret_storage": True,
+ "key_vault_name": "unit-test-vault",
+ "key_vault_identity": None,
+ }
+
+ azure_stub = types.ModuleType("azure")
+ identity_stub = types.ModuleType("azure.identity")
+ keyvault_stub = types.ModuleType("azure.keyvault")
+ secrets_stub = types.ModuleType("azure.keyvault.secrets")
+
+ class FakeDefaultAzureCredential:
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+ identity_stub.DefaultAzureCredential = FakeDefaultAzureCredential
+ secrets_stub.SecretClient = FakeSecretClient
+ azure_stub.identity = identity_stub
+ azure_stub.keyvault = keyvault_stub
+ keyvault_stub.secrets = secrets_stub
+
+ original_modules = {}
+ module_stubs = {
+ "config": config_stub,
+ "functions_appinsights": appinsights_stub,
+ "functions_authentication": auth_stub,
+ "functions_settings": settings_stub,
+ "app_settings_cache": app_settings_cache_stub,
+ "azure": azure_stub,
+ "azure.identity": identity_stub,
+ "azure.keyvault": keyvault_stub,
+ "azure.keyvault.secrets": secrets_stub,
+ }
+
+ for module_name, module_stub in module_stubs.items():
+ original_modules[module_name] = sys.modules.get(module_name)
+ sys.modules[module_name] = module_stub
+
+ original_modules["functions_keyvault"] = sys.modules.get("functions_keyvault")
+ sys.modules.pop("functions_keyvault", None)
+
+ module = importlib.import_module("functions_keyvault")
+ return module, original_modules
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file_handle:
+ return file_handle.read()
+
+
+def test_model_endpoint_key_vault_helper_lifecycle():
+ """Ensure endpoint auth secrets are stored, resolved, and cleaned up correctly."""
+ print("🔍 Testing model endpoint Key Vault helper lifecycle...")
+ FakeSecretClient.reset()
+ module, original_modules = load_functions_keyvault_module()
+
+ try:
+ endpoint = {
+ "id": "endpoint-123",
+ "name": "Primary Endpoint",
+ "auth": {
+ "type": "api_key",
+ "api_key": "super-secret-key",
+ },
+ }
+
+ saved_endpoint = module.keyvault_model_endpoint_save_helper(endpoint, "endpoint-123", scope="user")
+ secret_reference = saved_endpoint["auth"]["api_key"]
+
+ assert secret_reference == "endpoint-123--model-endpoint--user--model-endpoint-api-key"
+ assert FakeSecretClient.stored_secrets[secret_reference] == "super-secret-key"
+
+ placeholder_endpoint = module.keyvault_model_endpoint_get_helper(
+ saved_endpoint,
+ "endpoint-123",
+ scope="user",
+ return_type=module.SecretReturnType.TRIGGER,
+ )
+ assert placeholder_endpoint["auth"]["api_key"] == module.ui_trigger_word
+
+ resolved_endpoint = module.keyvault_model_endpoint_get_helper(
+ saved_endpoint,
+ "endpoint-123",
+ scope="user",
+ return_type=module.SecretReturnType.VALUE,
+ )
+ assert resolved_endpoint["auth"]["api_key"] == "super-secret-key"
+
+ updated_endpoint = module.keyvault_model_endpoint_save_helper(
+ {
+ "id": "endpoint-123",
+ "name": "Primary Endpoint",
+ "auth": {
+ "type": "managed_identity",
+ "api_key": "",
+ },
+ },
+ "endpoint-123",
+ scope="user",
+ existing_endpoint=saved_endpoint,
+ )
+ assert updated_endpoint["auth"].get("api_key") is None
+
+ module.keyvault_model_endpoint_cleanup_helper(
+ saved_endpoint,
+ updated_endpoint,
+ "endpoint-123",
+ scope="user",
+ )
+ assert secret_reference not in FakeSecretClient.stored_secrets
+ assert secret_reference in FakeSecretClient.deleted_secrets
+
+ print("✅ Model endpoint Key Vault helper lifecycle passed.")
+ finally:
+ restore_modules(original_modules)
+
+
+def test_model_endpoint_frontend_contract_files():
+ """Ensure the frontend/backend contract includes endpoint IDs and stored-secret placeholders."""
+ print("🔍 Verifying model endpoint UI/backend stored-secret contract...")
+ admin_js_path = os.path.join(SINGLE_APP_ROOT, "static", "js", "admin", "admin_model_endpoints.js")
+ workspace_js_path = os.path.join(SINGLE_APP_ROOT, "static", "js", "workspace", "workspace_model_endpoints.js")
+ backend_path = os.path.join(SINGLE_APP_ROOT, "route_backend_models.py")
+
+ admin_js = read_file_text(admin_js_path)
+ workspace_js = read_file_text(workspace_js_path)
+ backend_content = read_file_text(backend_path)
+
+ assert 'const endpointId = endpointIdInput?.value.trim() || "";' in admin_js
+ assert 'const endpointId = endpointIdInput?.value.trim() || "";' in workspace_js
+ assert 'clientSecretInput.placeholder = "Stored"' in admin_js
+ assert 'apiKeyInput.placeholder = "Stored"' in admin_js
+ assert 'resolve_request_endpoint_payload' in backend_content
+ assert 'keyvault_model_endpoint_get_helper' in backend_content
+
+ print("✅ Model endpoint UI/backend stored-secret contract passed.")
+
+
+def run_tests():
+ tests = [
+ test_model_endpoint_key_vault_helper_lifecycle,
+ test_model_endpoint_frontend_contract_files,
+ ]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ sys.exit(0 if run_tests() else 1)
\ No newline at end of file
diff --git a/functional_tests/test_model_endpoints_save_button.py b/functional_tests/test_model_endpoints_save_button.py
new file mode 100644
index 00000000..71e8c79a
--- /dev/null
+++ b/functional_tests/test_model_endpoints_save_button.py
@@ -0,0 +1,32 @@
+# test_model_endpoints_save_button.py
+#!/usr/bin/env python3
+"""
+Functional test for model endpoint Save button wiring.
+Version: 0.236.021
+Implemented in: 0.236.021
+
+This test ensures the Save Endpoint button triggers save logic and surfaces errors.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_model_endpoints_save_button():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ js_path = os.path.join(repo_root, 'application', 'single_app', 'static', 'js', 'admin', 'admin_model_endpoints.js')
+ content = read_file_text(js_path)
+
+ assert 'saveEndpoint()' in content, "Save endpoint handler missing."
+ assert 'event.preventDefault()' in content, "Save button should prevent default."
+ assert 'Failed to save endpoint' in content, "Expected error handling for save endpoint."
+
+ print("✅ Save Endpoint button handler verified.")
+
+
+if __name__ == "__main__":
+ test_model_endpoints_save_button()
diff --git a/functional_tests/test_model_endpoints_save_toast_message.py b/functional_tests/test_model_endpoints_save_toast_message.py
new file mode 100644
index 00000000..3e91b348
--- /dev/null
+++ b/functional_tests/test_model_endpoints_save_toast_message.py
@@ -0,0 +1,30 @@
+# test_model_endpoints_save_toast_message.py
+#!/usr/bin/env python3
+"""
+Functional test for model endpoint save toast messaging.
+Version: 0.236.022
+Implemented in: 0.236.022
+
+This test ensures the save toast reminds admins to save settings to persist changes.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_model_endpoints_save_toast_message():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ js_path = os.path.join(repo_root, 'application', 'single_app', 'static', 'js', 'admin', 'admin_model_endpoints.js')
+ content = read_file_text(js_path)
+
+ assert "Please save your settings" in content, "Expected save toast reminder in endpoint save flow."
+
+ print("✅ Save toast reminder verified.")
+
+
+if __name__ == "__main__":
+ test_model_endpoints_save_toast_message()
diff --git a/functional_tests/test_msgraph_auth_consent_flow.py b/functional_tests/test_msgraph_auth_consent_flow.py
new file mode 100644
index 00000000..85e7d9f1
--- /dev/null
+++ b/functional_tests/test_msgraph_auth_consent_flow.py
@@ -0,0 +1,229 @@
+# test_msgraph_auth_consent_flow.py
+"""
+Functional test for Microsoft Graph incremental auth flow.
+Version: 0.239.175
+Implemented in: 0.239.175
+
+This test ensures plugin-requested Microsoft Graph scopes survive the OAuth
+callback and that interactive reauthentication does not force consent unless
+Microsoft Entra explicitly reports missing consent.
+"""
+
+from pathlib import Path
+import sys
+
+from flask import Flask, session
+
+
+ROOT = Path(__file__).resolve().parents[1]
+APP_DIR = ROOT / "application" / "single_app"
+
+if str(APP_DIR) not in sys.path:
+ sys.path.insert(0, str(APP_DIR))
+
+
+import functions_authentication as auth_module # noqa: E402
+import functions_settings # noqa: E402
+import route_frontend_authentication as route_module # noqa: E402
+
+
+class FakeTokenCache:
+ """Minimal MSAL token cache stub for callback tests."""
+
+ has_state_changed = False
+
+
+class FakeMsalApp:
+ """Simple MSAL stub that records interactive and callback requests."""
+
+ def __init__(self, silent_result=None, auth_code_result=None):
+ self.silent_result = silent_result
+ self.auth_code_result = auth_code_result or {
+ "id_token_claims": {
+ "name": "Test User",
+ "oid": "oid",
+ "sub": "sub",
+ },
+ "access_token": "fake-token",
+ }
+ self.authorization_requests = []
+ self.auth_code_requests = []
+ self.token_cache = FakeTokenCache()
+
+ def get_accounts(self, username=None):
+ return [{"home_account_id": "oid.tid", "username": username}]
+
+ def acquire_token_silent_with_error(self, scopes, account=None):
+ self.last_silent_request = {
+ "scopes": list(scopes),
+ "account": account,
+ }
+ return self.silent_result
+
+ def get_authorization_request_url(self, scopes, **kwargs):
+ self.authorization_requests.append(
+ {
+ "scopes": list(scopes),
+ **kwargs,
+ }
+ )
+ prompt_value = kwargs.get("prompt", "none")
+ return f"https://login.example.test/authorize?prompt={prompt_value}&scopes={','.join(scopes)}"
+
+ def acquire_token_by_authorization_code(self, code=None, scopes=None, redirect_uri=None):
+ self.auth_code_requests.append(
+ {
+ "code": code,
+ "scopes": list(scopes or []),
+ "redirect_uri": redirect_uri,
+ }
+ )
+ return self.auth_code_result
+
+
+def test_plugin_auth_uses_interactive_sign_in_without_forced_consent() -> bool:
+ """Verify a silent token miss stores scopes and does not force prompt=consent."""
+ print("Testing Microsoft Graph interactive auth without forced consent...")
+
+ app = Flask(__name__)
+ app.secret_key = "test-secret"
+ fake_msal_app = FakeMsalApp(silent_result=None)
+ original_builder = auth_module._build_msal_app
+
+ try:
+ auth_module._build_msal_app = lambda cache=None, authority_override=None: fake_msal_app
+
+ with app.test_request_context("/", base_url="https://example.test"):
+ session["user"] = {
+ "oid": "oid",
+ "tid": "tid",
+ "preferred_username": "user@example.com",
+ }
+
+ result = auth_module.get_valid_access_token_for_plugins(["MailboxSettings.Read"])
+
+ if result.get("error") != "interactive_auth_required":
+ print(f"Expected interactive_auth_required, got: {result}")
+ return False
+ if auth_module.get_requested_oauth_scopes() != ["MailboxSettings.Read"]:
+ print(f"Expected stored requested scopes, got: {auth_module.get_requested_oauth_scopes()}")
+ return False
+ if not fake_msal_app.authorization_requests:
+ print("Expected an interactive auth URL request to be generated")
+ return False
+ if fake_msal_app.authorization_requests[0].get("prompt") is not None:
+ print(f"Expected no forced prompt for silent token miss, got: {fake_msal_app.authorization_requests[0]}")
+ return False
+
+ print("Microsoft Graph interactive auth avoids forced consent when reauth is enough")
+ return True
+ finally:
+ auth_module._build_msal_app = original_builder
+
+
+def test_plugin_auth_only_forces_consent_when_entra_requires_it() -> bool:
+ """Verify explicit consent errors still request prompt=consent."""
+ print("Testing Microsoft Graph explicit consent handling...")
+
+ app = Flask(__name__)
+ app.secret_key = "test-secret"
+ fake_msal_app = FakeMsalApp(
+ silent_result={
+ "error": "invalid_grant",
+ "error_description": "AADSTS65001: consent_required",
+ }
+ )
+ original_builder = auth_module._build_msal_app
+
+ try:
+ auth_module._build_msal_app = lambda cache=None, authority_override=None: fake_msal_app
+
+ with app.test_request_context("/", base_url="https://example.test"):
+ session["user"] = {
+ "oid": "oid",
+ "tid": "tid",
+ "preferred_username": "user@example.com",
+ }
+
+ result = auth_module.get_valid_access_token_for_plugins(["Calendars.Read"])
+
+ if result.get("error") != "consent_required":
+ print(f"Expected consent_required, got: {result}")
+ return False
+ if fake_msal_app.authorization_requests[0].get("prompt") != "consent":
+ print(f"Expected prompt=consent for explicit consent errors, got: {fake_msal_app.authorization_requests[0]}")
+ return False
+
+ print("Microsoft Graph consent flow still forces consent when Entra requires it")
+ return True
+ finally:
+ auth_module._build_msal_app = original_builder
+
+
+def test_oauth_callback_redeems_requested_plugin_scopes() -> bool:
+ """Verify the OAuth callback uses the stored plugin scopes instead of base login scopes."""
+ print("Testing OAuth callback scope redemption for plugin auth...")
+
+ app = Flask(__name__)
+ app.secret_key = "test-secret"
+
+ @app.route("/")
+ def index():
+ return "ok"
+
+ route_module.register_route_frontend_authentication(app)
+
+ fake_msal_app = FakeMsalApp()
+ original_builder = route_module._build_msal_app
+ original_load_cache = route_module._load_cache
+ original_save_cache = route_module._save_cache
+ original_get_settings = functions_settings.get_settings
+
+ try:
+ route_module._build_msal_app = lambda cache=None: fake_msal_app
+ route_module._load_cache = lambda: None
+ route_module._save_cache = lambda cache: None
+ functions_settings.get_settings = lambda: {"enable_front_door": False}
+
+ with app.test_request_context("/getAToken?code=fake-code", base_url="https://example.test"):
+ session[auth_module.REQUESTED_SCOPES_SESSION_KEY] = ["MailboxSettings.Read"]
+
+ response = app.view_functions["authorized"]()
+
+ if response.status_code != 302:
+ print(f"Expected callback redirect, got status {response.status_code}")
+ return False
+ if not fake_msal_app.auth_code_requests:
+ print("Expected acquire_token_by_authorization_code to be called")
+ return False
+ if fake_msal_app.auth_code_requests[0].get("scopes") != ["MailboxSettings.Read"]:
+ print(f"Expected callback to redeem requested plugin scopes, got: {fake_msal_app.auth_code_requests[0]}")
+ return False
+ if auth_module.REQUESTED_SCOPES_SESSION_KEY in session:
+ print(f"Expected requested scope state to be cleared, got: {dict(session)}")
+ return False
+
+ print("OAuth callback redeems the requested plugin scopes and clears session state")
+ return True
+ finally:
+ route_module._build_msal_app = original_builder
+ route_module._load_cache = original_load_cache
+ route_module._save_cache = original_save_cache
+ functions_settings.get_settings = original_get_settings
+
+
+if __name__ == "__main__":
+ tests = [
+ test_plugin_auth_uses_interactive_sign_in_without_forced_consent,
+ test_plugin_auth_only_forces_consent_when_entra_requires_it,
+ test_oauth_callback_redeems_requested_plugin_scopes,
+ ]
+
+ results = []
+ for test in tests:
+ print(f"\nRunning {test.__name__}...")
+ results.append(test())
+
+ success = all(results)
+ print(f"\nResults: {sum(results)}/{len(tests)} tests passed")
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_msgraph_plugin_operations.py b/functional_tests/test_msgraph_plugin_operations.py
new file mode 100644
index 00000000..a8b32e47
--- /dev/null
+++ b/functional_tests/test_msgraph_plugin_operations.py
@@ -0,0 +1,234 @@
+# test_msgraph_plugin_operations.py
+#!/usr/bin/env python3
+"""
+Functional test for Microsoft Graph plugin operations.
+Version: 0.239.174
+Implemented in: 0.239.174
+
+This test ensures the Microsoft Graph plugin exposes the new high-value read
+operations, routes requests through the shared Graph helper, and handles
+pagination and token consent failures safely.
+"""
+
+from pathlib import Path
+import sys
+
+
+ROOT = Path(__file__).resolve().parents[1]
+APP_DIR = ROOT / "application" / "single_app"
+
+if str(APP_DIR) not in sys.path:
+ sys.path.insert(0, str(APP_DIR))
+
+
+from semantic_kernel_plugins.msgraph_plugin import MSGraphPlugin # noqa: E402
+import semantic_kernel_plugins.msgraph_plugin as msgraph_module # noqa: E402
+
+
+class FakeResponse:
+ def __init__(self, status_code, payload, headers=None, text=""):
+ self.status_code = status_code
+ self._payload = payload
+ self.headers = headers or {}
+ self.text = text
+
+ def json(self):
+ return self._payload
+
+
+def test_msgraph_plugin_exposes_expected_operations() -> bool:
+ """Verify the plugin metadata and registration expose the new operations."""
+ print("Testing Microsoft Graph plugin metadata and function registration...")
+
+ plugin = MSGraphPlugin({"name": "msgraph_plugin"})
+ expected_functions = {
+ "get_my_profile",
+ "get_my_timezone",
+ "get_my_events",
+ "get_my_messages",
+ "search_users",
+ "get_user_by_email",
+ "list_drive_items",
+ "get_my_security_alerts",
+ }
+
+ registered_functions = set(plugin.get_functions())
+ if expected_functions != registered_functions:
+ print(f"Unexpected function registration. Got: {sorted(registered_functions)}")
+ return False
+
+ method_names = {method["name"] for method in plugin.metadata.get("methods", [])}
+ missing_methods = expected_functions - method_names
+ if missing_methods:
+ print(f"Metadata missing methods: {sorted(missing_methods)}")
+ return False
+
+ print("Microsoft Graph plugin metadata includes the expected operations")
+ return True
+
+
+def test_msgraph_plugin_paginates_and_truncates_list_results() -> bool:
+ """Verify the shared Graph helper paginates list results up to the requested max."""
+ print("Testing Microsoft Graph helper pagination behavior...")
+
+ plugin = MSGraphPlugin({"name": "msgraph_plugin"})
+ original_token_helper = msgraph_module.get_valid_access_token_for_plugins
+ original_request = msgraph_module.requests.request
+
+ responses = [
+ FakeResponse(200, {"value": [{"id": "1"}, {"id": "2"}], "@odata.nextLink": "https://graph.microsoft.com/v1.0/me/messages?page=2"}),
+ FakeResponse(200, {"value": [{"id": "3"}, {"id": "4"}]})
+ ]
+ request_log = []
+
+ def fake_token_helper(scopes=None):
+ return {"access_token": "fake-token", "scopes": scopes}
+
+ def fake_request(method, url, headers=None, params=None, json=None, timeout=None):
+ request_log.append({
+ "method": method,
+ "url": url,
+ "headers": headers,
+ "params": params,
+ "timeout": timeout,
+ })
+ return responses.pop(0)
+
+ try:
+ msgraph_module.get_valid_access_token_for_plugins = fake_token_helper
+ msgraph_module.requests.request = fake_request
+
+ result = plugin.get_my_messages(top=3)
+
+ if result.get("count") != 3:
+ print(f"Expected 3 messages after pagination, got: {result}")
+ return False
+ if len(result.get("value", [])) != 3:
+ print(f"Expected truncated message list of length 3, got: {result.get('value', [])}")
+ return False
+ if not result.get("truncated"):
+ print(f"Expected truncated=True when additional page data remains, got: {result}")
+ return False
+ if len(request_log) != 2:
+ print(f"Expected 2 Graph requests, got {len(request_log)}")
+ return False
+
+ print("Microsoft Graph helper paginates and truncates list results safely")
+ return True
+ finally:
+ msgraph_module.get_valid_access_token_for_plugins = original_token_helper
+ msgraph_module.requests.request = original_request
+
+
+def test_msgraph_plugin_reads_mailbox_timezone() -> bool:
+ """Verify the plugin can read mailbox timezone settings for the signed-in user."""
+ print("Testing Microsoft Graph mailbox timezone lookup...")
+
+ plugin = MSGraphPlugin({"name": "msgraph_plugin"})
+ original_token_helper = msgraph_module.get_valid_access_token_for_plugins
+ original_request = msgraph_module.requests.request
+
+ request_log = []
+
+ def fake_token_helper(scopes=None):
+ request_log.append({"scopes": scopes})
+ return {"access_token": "fake-token", "scopes": scopes}
+
+ def fake_request(method, url, headers=None, params=None, json=None, timeout=None):
+ request_log.append({
+ "method": method,
+ "url": url,
+ "headers": headers,
+ "params": params,
+ "timeout": timeout,
+ })
+ return FakeResponse(
+ 200,
+ {
+ "timeZone": "Pacific Standard Time",
+ "dateFormat": "M/d/yyyy",
+ "timeFormat": "h:mm tt",
+ "language": {"locale": "en-US", "displayName": "English (United States)"},
+ "workingHours": {"timeZone": {"name": "Pacific Standard Time"}},
+ },
+ )
+
+ try:
+ msgraph_module.get_valid_access_token_for_plugins = fake_token_helper
+ msgraph_module.requests.request = fake_request
+
+ result = plugin.get_my_timezone()
+ if result.get("time_zone") != "Pacific Standard Time":
+ print(f"Expected Pacific Standard Time, got: {result}")
+ return False
+ if result.get("time_format") != "h:mm tt":
+ print(f"Expected mailbox time format in result, got: {result}")
+ return False
+ if request_log[0].get("scopes") != ["MailboxSettings.Read"]:
+ print(f"Expected MailboxSettings.Read scope, got: {request_log[0]}")
+ return False
+ if not str(request_log[1].get("url", "")).endswith("/v1.0/me/mailboxSettings"):
+ print(f"Expected mailboxSettings request URL, got: {request_log[1]}")
+ return False
+
+ print("Microsoft Graph plugin reads mailbox timezone settings safely")
+ return True
+ finally:
+ msgraph_module.get_valid_access_token_for_plugins = original_token_helper
+ msgraph_module.requests.request = original_request
+
+
+def test_msgraph_plugin_surfaces_token_consent_errors() -> bool:
+ """Verify token acquisition failures are returned as structured plugin errors."""
+ print("Testing Microsoft Graph token consent error handling...")
+
+ plugin = MSGraphPlugin({"name": "msgraph_plugin"})
+ original_token_helper = msgraph_module.get_valid_access_token_for_plugins
+ original_request = msgraph_module.requests.request
+
+ def fake_token_helper(scopes=None):
+ return {
+ "error": "consent_required",
+ "message": "Consent is required.",
+ "consent_url": "https://example.test/consent",
+ "scopes": scopes,
+ }
+
+ def fail_if_called(*args, **kwargs):
+ raise AssertionError("requests.request should not be called when token acquisition fails")
+
+ try:
+ msgraph_module.get_valid_access_token_for_plugins = fake_token_helper
+ msgraph_module.requests.request = fail_if_called
+
+ result = plugin.search_users("Ada")
+ if result.get("error") != "consent_required":
+ print(f"Expected consent_required result, got: {result}")
+ return False
+ if result.get("operation") != "search_users":
+ print(f"Expected operation name in token error result, got: {result}")
+ return False
+
+ print("Microsoft Graph plugin surfaces consent requirements safely")
+ return True
+ finally:
+ msgraph_module.get_valid_access_token_for_plugins = original_token_helper
+ msgraph_module.requests.request = original_request
+
+
+if __name__ == "__main__":
+ tests = [
+ test_msgraph_plugin_exposes_expected_operations,
+ test_msgraph_plugin_paginates_and_truncates_list_results,
+ test_msgraph_plugin_reads_mailbox_timezone,
+ test_msgraph_plugin_surfaces_token_consent_errors,
+ ]
+
+ results = []
+ for test in tests:
+ print(f"\nRunning {test.__name__}...")
+ results.append(test())
+
+ success = all(results)
+ print(f"\nResults: {sum(results)}/{len(tests)} tests passed")
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_multi_endpoint_migration_auth_preserved.py b/functional_tests/test_multi_endpoint_migration_auth_preserved.py
new file mode 100644
index 00000000..dc2ea591
--- /dev/null
+++ b/functional_tests/test_multi_endpoint_migration_auth_preserved.py
@@ -0,0 +1,35 @@
+# test_multi_endpoint_migration_auth_preserved.py
+#!/usr/bin/env python3
+"""
+Functional test for multi-endpoint migration auth preservation.
+Version: 0.236.016
+Implemented in: 0.236.016
+
+This test ensures the migration preserves authentication type, API key,
+subscription ID, and resource group for Azure OpenAI endpoints.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file:
+ return file.read()
+
+
+def test_multi_endpoint_migration_auth_preserved():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+ route_path = os.path.join(repo_root, 'application', 'single_app', 'route_frontend_admin_settings.py')
+ content = read_file_text(route_path)
+
+ assert "legacy_auth_type" in content, "Expected legacy auth type mapping for migration."
+ assert "migrated_auth_type" in content, "Expected migrated auth type mapping for migration."
+ assert "azure_openai_gpt_key" in content, "Expected GPT API key migration."
+ assert "azure_openai_gpt_subscription_id" in content, "Expected subscription ID migration."
+ assert "azure_openai_gpt_resource_group" in content, "Expected resource group migration."
+
+ print("✅ Migration preserves auth type, API key, and subscription data.")
+
+
+if __name__ == "__main__":
+ test_multi_endpoint_migration_auth_preserved()
diff --git a/functional_tests/test_new_conversation_agent_model_refresh.py b/functional_tests/test_new_conversation_agent_model_refresh.py
new file mode 100644
index 00000000..7ff49706
--- /dev/null
+++ b/functional_tests/test_new_conversation_agent_model_refresh.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python3
+"""
+Functional test for new conversation agent/model refresh.
+Version: 0.236.066
+Implemented in: 0.236.066
+
+This test ensures new conversations refresh both agent and model lists and
+use activeGroupOid from user settings when loading group agents.
+"""
+
+import os
+import sys
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
+
+def _read_file(path):
+ with open(path, "r", encoding="utf-8") as handle:
+ return handle.read()
+
+
+def _assert_contains(text, snippet, file_label):
+ if snippet not in text:
+ raise AssertionError(f"Missing '{snippet}' in {file_label}")
+
+
+def test_new_conversation_agent_model_refresh():
+ """Verify refresh logic and activeGroupOid usage exist in UI code."""
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+
+ chat_agents_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "chat",
+ "chat-agents.js",
+ )
+ chat_retry_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "chat",
+ "chat-retry.js",
+ )
+ chat_conversations_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "chat",
+ "chat-conversations.js",
+ )
+
+ chat_agents_js = _read_file(chat_agents_path)
+ chat_retry_js = _read_file(chat_retry_path)
+ chat_conversations_js = _read_file(chat_conversations_path)
+
+ _assert_contains(chat_agents_js, "activeGroupOid", "chat-agents.js")
+ _assert_contains(chat_agents_js, "fetchGroupAgentsForActiveGroup(activeGroupId)", "chat-agents.js")
+ _assert_contains(chat_retry_js, "activeGroupOid", "chat-retry.js")
+ _assert_contains(chat_conversations_js, "refreshModelSelection", "chat-conversations.js")
+
+ print("✅ New conversation agent/model refresh logic verified")
+ return True
+
+
+if __name__ == "__main__":
+ success = test_new_conversation_agent_model_refresh()
+ sys.exit(0 if success else 1)
diff --git a/functional_tests/test_new_foundry_endpoint_api_version_handling.py b/functional_tests/test_new_foundry_endpoint_api_version_handling.py
new file mode 100644
index 00000000..8a87e5f3
--- /dev/null
+++ b/functional_tests/test_new_foundry_endpoint_api_version_handling.py
@@ -0,0 +1,62 @@
+# test_new_foundry_endpoint_api_version_handling.py
+#!/usr/bin/env python3
+"""
+Functional test for New Foundry endpoint API version handling.
+Version: 0.239.180
+Implemented in: 0.239.180
+
+This test ensures that New Foundry endpoints do not silently inherit the
+generic Azure OpenAI API-version default and that existing agents preserve
+their stored Responses API version instead of being overwritten by endpoint
+fallback values.
+"""
+
+from pathlib import Path
+
+
+ROOT = Path(__file__).resolve().parents[1]
+
+
+def assert_contains(file_path: Path, expected: str) -> None:
+ content = file_path.read_text(encoding="utf-8")
+ if expected not in content:
+ raise AssertionError(f"Expected to find {expected!r} in {file_path}")
+
+
+def test_new_foundry_endpoint_api_version_handling() -> None:
+ print("Testing New Foundry endpoint API version handling...")
+
+ endpoint_js = ROOT / "application" / "single_app" / "static" / "js" / "admin" / "admin_model_endpoints.js"
+ agent_modal_js = ROOT / "application" / "single_app" / "static" / "js" / "agent_modal_stepper.js"
+ loader_py = ROOT / "application" / "single_app" / "semantic_kernel_loader.py"
+ endpoint_modal = ROOT / "application" / "single_app" / "templates" / "_multiendpoint_modal.html"
+ config_file = ROOT / "application" / "single_app" / "config.py"
+
+ assert_contains(endpoint_js, 'const DEFAULT_AOAI_OPENAI_API_VERSION = "2024-05-01-preview";')
+ assert_contains(endpoint_js, 'return provider === "new_foundry" ? "" : DEFAULT_AOAI_OPENAI_API_VERSION;')
+ assert_contains(endpoint_js, 'if (provider === "new_foundry") {')
+ assert_contains(endpoint_js, 'endpointOpenAiApiVersionInput.value = "";')
+ assert_contains(agent_modal_js, "const fetchedResponsesApiVersion = payload.responses_api_version || '';")
+ assert_contains(agent_modal_js, 'const preserveCurrentSelection = this.shouldPreserveCurrentFoundrySelection(endpointId);')
+ assert_contains(agent_modal_js, "const storedResponsesApiVersion = currentFoundrySettings.responses_api_version || '';")
+ assert_contains(loader_py, 'stored_responses_api_version = (')
+ assert_contains(loader_py, 'or agent.get("azure_openai_gpt_api_version")')
+ assert_contains(agent_modal_js, "if (responsesApiVersionInput && selected.responses_api_version) {")
+ assert_contains(endpoint_modal, 'No default is applied for New Foundry')
+ assert_contains(config_file, 'VERSION = "0.239.180"')
+
+ print("✅ New Foundry endpoint API version handling verified.")
+
+
+if __name__ == "__main__":
+ success = True
+ try:
+ test_new_foundry_endpoint_api_version_handling()
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ success = False
+
+ raise SystemExit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_new_foundry_fetch_support.py b/functional_tests/test_new_foundry_fetch_support.py
new file mode 100644
index 00000000..d978a67d
--- /dev/null
+++ b/functional_tests/test_new_foundry_fetch_support.py
@@ -0,0 +1,70 @@
+# test_new_foundry_fetch_support.py
+#!/usr/bin/env python3
+"""
+Functional test for new Foundry fetch support.
+Version: 0.239.180
+Implemented in: 0.239.180
+
+This test ensures that the backend exposes a new_foundry fetch path and that
+the agent modal can fetch and apply New Foundry application metadata.
+"""
+
+import os
+import sys
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file:
+ return file.read()
+
+
+def test_new_foundry_fetch_support_present():
+ """Validate New Foundry fetch plumbing across backend and modal files."""
+ print("🔍 Testing New Foundry fetch support...")
+
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ models_path = os.path.join(repo_root, "application", "single_app", "route_backend_models.py")
+ modal_js_path = os.path.join(repo_root, "application", "single_app", "static", "js", "agent_modal_stepper.js")
+ modal_html_path = os.path.join(repo_root, "application", "single_app", "templates", "_agent_modal.html")
+
+ models_content = read_file_text(models_path)
+ modal_js_content = read_file_text(modal_js_path)
+ modal_html_content = read_file_text(modal_html_path)
+
+ required_snippets = [
+ "list_new_foundry_agents_from_endpoint",
+ 'if provider == "new_foundry":',
+ 'agents = list_new_foundry_agents_from_endpoint(foundry_settings, get_settings())',
+ 'id="agent-type-new-foundry" value="new_foundry"',
+ "foundryFetchBtnLabel.textContent = isNewFoundry ? 'Fetch Applications' : 'Fetch Agents';",
+ "const versionSuffix = agent.application_version ? ` (v${agent.application_version})` : '';",
+ '"responses_api_version": responses_api_version,',
+ "const fetchedResponsesApiVersion = payload.responses_api_version || '';",
+ "return normalizedProvider === this.getCurrentFoundryProvider();",
+ "applicationIdInput.value = selected.application_id || selected.id || '';",
+ 'id="agent-foundry-fetch-btn-label"',
+ 'id="agent-foundry-select-label"',
+ ]
+
+ combined = "\n".join([models_content, modal_js_content, modal_html_content])
+ missing = [snippet for snippet in required_snippets if snippet not in combined]
+ if missing:
+ raise AssertionError(f"Missing expected New Foundry fetch snippets: {', '.join(missing)}")
+
+ print("✅ New Foundry fetch support verified.")
+
+
+if __name__ == "__main__":
+ success = True
+ try:
+ test_new_foundry_fetch_support_present()
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ success = False
+
+ raise SystemExit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_new_foundry_streaming_runtime.py b/functional_tests/test_new_foundry_streaming_runtime.py
new file mode 100644
index 00000000..0436a378
--- /dev/null
+++ b/functional_tests/test_new_foundry_streaming_runtime.py
@@ -0,0 +1,68 @@
+# test_new_foundry_streaming_runtime.py
+#!/usr/bin/env python3
+"""
+Functional test for new Foundry REST streaming runtime.
+Version: 0.239.177
+Implemented in: 0.239.177
+
+This test ensures that new Foundry application discovery stays REST-based,
+that the runtime exposes a streaming executor, and that the chat stream route
+emits agent deltas as they arrive instead of buffering them first.
+"""
+
+from pathlib import Path
+
+
+ROOT = Path(__file__).resolve().parents[1]
+
+
+def assert_contains(file_path: Path, expected: str) -> None:
+ content = file_path.read_text(encoding="utf-8")
+ if expected not in content:
+ raise AssertionError(f"Expected to find {expected!r} in {file_path}")
+
+
+def assert_not_contains(file_path: Path, forbidden: str) -> None:
+ content = file_path.read_text(encoding="utf-8")
+ if forbidden in content:
+ raise AssertionError(f"Did not expect to find {forbidden!r} in {file_path}")
+
+
+def test_new_foundry_streaming_runtime() -> None:
+ print("Testing new Foundry REST streaming runtime...")
+
+ runtime_path = ROOT / "application" / "single_app" / "foundry_agent_runtime.py"
+ chats_path = ROOT / "application" / "single_app" / "route_backend_chats.py"
+ models_path = ROOT / "application" / "single_app" / "route_backend_models.py"
+ config_path = ROOT / "application" / "single_app" / "config.py"
+
+ assert_contains(runtime_path, "async def execute_new_foundry_agent_stream(")
+ assert_contains(runtime_path, '"stream": stream')
+ assert_contains(runtime_path, "stream=True,")
+ assert_contains(runtime_path, "def _iter_sse_events(response: requests.Response):")
+ assert_contains(runtime_path, "yield FoundryAgentStreamMessage(content=delta_text)")
+ assert_not_contains(runtime_path, "from azure.ai.projects import AIProjectClient")
+
+ assert_contains(models_path, 'agents = list_new_foundry_agents_from_endpoint(foundry_settings, get_settings())')
+
+ assert_contains(chats_path, "agent_stream = selected_agent.invoke_stream(messages=agent_message_history)")
+ assert_contains(chats_path, "response = loop.run_until_complete(agent_stream.__anext__())")
+ assert_not_contains(chats_path, "chunks, stream_usage = loop.run_until_complete(stream_agent_async())")
+
+ assert_contains(config_path, 'VERSION = "0.239.177"')
+
+ print("✅ New Foundry REST streaming runtime verified.")
+
+
+if __name__ == "__main__":
+ try:
+ test_new_foundry_streaming_runtime()
+ success = True
+ except Exception as exc:
+ print(f"Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ success = False
+
+ raise SystemExit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_new_foundry_ui_visibility.py b/functional_tests/test_new_foundry_ui_visibility.py
new file mode 100644
index 00000000..80da0b83
--- /dev/null
+++ b/functional_tests/test_new_foundry_ui_visibility.py
@@ -0,0 +1,60 @@
+# test_new_foundry_ui_visibility.py
+#!/usr/bin/env python3
+"""
+Functional test for New Foundry UI visibility.
+Version: 0.239.180
+Implemented in: 0.239.180
+
+This test ensures that New Foundry is exposed again in the agent modal,
+the endpoint modal, and frontend endpoint sanitization so the browser UI can
+configure and select New Foundry resources.
+"""
+
+from pathlib import Path
+
+
+ROOT = Path(__file__).resolve().parents[1]
+
+
+def assert_contains(file_path: Path, expected: str) -> None:
+ content = file_path.read_text(encoding="utf-8")
+ if expected not in content:
+ raise AssertionError(f"Expected to find {expected!r} in {file_path}")
+
+
+def assert_not_contains(file_path: Path, forbidden: str) -> None:
+ content = file_path.read_text(encoding="utf-8")
+ if forbidden in content:
+ raise AssertionError(f"Did not expect to find {forbidden!r} in {file_path}")
+
+
+def test_new_foundry_ui_visibility() -> None:
+ print("Testing New Foundry UI visibility...")
+
+ agent_modal = ROOT / "application" / "single_app" / "templates" / "_agent_modal.html"
+ endpoint_modal = ROOT / "application" / "single_app" / "templates" / "_multiendpoint_modal.html"
+ settings_file = ROOT / "application" / "single_app" / "functions_settings.py"
+ config_file = ROOT / "application" / "single_app" / "config.py"
+
+ assert_contains(agent_modal, 'id="agent-type-new-foundry"')
+ assert_not_contains(agent_modal, '{% if false %}')
+
+ assert_contains(endpoint_modal, '')
+ assert_contains(settings_file, 'return normalized_provider in {"aoai", "aifoundry", "new_foundry"}')
+ assert_contains(config_file, 'VERSION = "0.239.180"')
+
+ print("✅ New Foundry UI visibility verified.")
+
+
+if __name__ == "__main__":
+ success = True
+ try:
+ test_new_foundry_ui_visibility()
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ success = False
+
+ raise SystemExit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_new_foundry_version_metadata.py b/functional_tests/test_new_foundry_version_metadata.py
new file mode 100644
index 00000000..ab89a67e
--- /dev/null
+++ b/functional_tests/test_new_foundry_version_metadata.py
@@ -0,0 +1,61 @@
+# test_new_foundry_version_metadata.py
+#!/usr/bin/env python3
+"""
+Functional test for New Foundry version metadata handling.
+Version: 0.239.180
+Implemented in: 0.239.180
+
+This test ensures that published New Foundry application versions are read from
+the fetched payload, shown in the selector label, and no longer require manual
+entry in the agent modal.
+"""
+
+from pathlib import Path
+
+
+ROOT = Path(__file__).resolve().parents[1]
+
+
+def assert_contains(file_path: Path, expected: str) -> None:
+ content = file_path.read_text(encoding="utf-8")
+ if expected not in content:
+ raise AssertionError(f"Expected to find {expected!r} in {file_path}")
+
+
+def assert_not_contains(file_path: Path, forbidden: str) -> None:
+ content = file_path.read_text(encoding="utf-8")
+ if forbidden in content:
+ raise AssertionError(f"Did not expect to find {forbidden!r} in {file_path}")
+
+
+def test_new_foundry_version_metadata() -> None:
+ print("Testing New Foundry version metadata handling...")
+
+ runtime_path = ROOT / "application" / "single_app" / "foundry_agent_runtime.py"
+ modal_js_path = ROOT / "application" / "single_app" / "static" / "js" / "agent_modal_stepper.js"
+ modal_html_path = ROOT / "application" / "single_app" / "templates" / "_agent_modal.html"
+ config_path = ROOT / "application" / "single_app" / "config.py"
+
+ assert_contains(runtime_path, '_extract_nested_version_value(item.get("versions"))')
+ assert_contains(runtime_path, '_extract_nested_version_value(properties.get("versions"))')
+ assert_contains(modal_js_path, 'const versionSuffix = agent.application_version ? ` (v${agent.application_version})` : \'\';')
+ assert_contains(modal_html_path, 'id="agent-new-foundry-application-version"')
+ assert_contains(modal_html_path, 'type="hidden" id="agent-new-foundry-application-version"')
+ assert_not_contains(modal_html_path, '')
+ assert_contains(config_path, 'VERSION = "0.239.180"')
+
+ print("✅ New Foundry version metadata handling verified.")
+
+
+if __name__ == "__main__":
+ success = True
+ try:
+ test_new_foundry_version_metadata()
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ success = False
+
+ raise SystemExit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_pending_thought_message_scoping.py b/functional_tests/test_pending_thought_message_scoping.py
new file mode 100644
index 00000000..8865a185
--- /dev/null
+++ b/functional_tests/test_pending_thought_message_scoping.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python3
+# test_pending_thought_message_scoping.py
+"""
+Functional test for pending thought message scoping.
+Version: 0.239.185
+Implemented in: 0.239.185
+
+This test ensures the pending-thought polling path can be scoped to the active
+assistant message so reconnect and fallback flows do not read thoughts from a
+different in-flight reply.
+"""
+
+import os
+import sys
+
+
+ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+THOUGHTS_FILE = os.path.join(
+ ROOT_DIR,
+ 'application', 'single_app', 'functions_thoughts.py'
+)
+ROUTE_FILE = os.path.join(
+ ROOT_DIR,
+ 'application', 'single_app', 'route_backend_thoughts.py'
+)
+CLIENT_FILE = os.path.join(
+ ROOT_DIR,
+ 'application', 'single_app', 'static', 'js', 'chat', 'chat-thoughts.js'
+)
+
+
+def read_file_content(file_path):
+ with open(file_path, 'r', encoding='utf-8') as file_handle:
+ return file_handle.read()
+
+
+def test_pending_thoughts_support_optional_message_id_scope():
+ """Verify the backend query path supports an explicit message scope."""
+ print('🔍 Testing pending thought backend message scoping...')
+
+ thoughts_content = read_file_content(THOUGHTS_FILE)
+
+ checks = {
+ 'optional message_id parameter': 'def get_pending_thoughts(conversation_id, user_id, message_id=None):' in thoughts_content,
+ 'query supports message filter': 'AND c.message_id = @msg_id ' in thoughts_content,
+ 'message filter parameter append': 'params.append({"name": "@msg_id", "value": message_id})' in thoughts_content,
+ 'message scoped branch': 'if message_id:' in thoughts_content,
+ 'legacy fallback branch preserved': 'latest_message_id = results[0].get(\'message_id\')' in thoughts_content,
+ }
+
+ all_passed = True
+ for name, passed in checks.items():
+ status = 'PASS' if passed else 'FAIL'
+ print(f' [{status}] {name}')
+ if not passed:
+ all_passed = False
+
+ assert all_passed
+
+
+def test_pending_thought_route_forwards_query_message_id():
+ """Verify the route reads message_id from the query string and returns it."""
+ print('\n🔍 Testing pending thought route query forwarding...')
+
+ route_content = read_file_content(ROUTE_FILE)
+
+ checks = {
+ 'route reads query string message_id': "message_id = request.args.get('message_id')" in route_content,
+ 'route forwards scoped query': 'get_pending_thoughts(conversation_id, user_id, message_id=message_id)' in route_content,
+ 'sanitized response includes message_id': "'message_id': t.get('message_id')" in route_content,
+ }
+
+ all_passed = True
+ for name, passed in checks.items():
+ status = 'PASS' if passed else 'FAIL'
+ print(f' [{status}] {name}')
+ if not passed:
+ all_passed = False
+
+ assert all_passed
+
+
+def test_pending_thought_client_builds_scoped_request_urls():
+ """Verify the browser helper can request pending thoughts for one message."""
+ print('\n🔍 Testing pending thought client URL construction...')
+
+ client_content = read_file_content(CLIENT_FILE)
+
+ checks = {
+ 'pending url helper exists': 'function buildPendingThoughtsUrl(conversationId, messageId = null)' in client_content,
+ 'query param name is message_id': "queryParams.set('message_id', messageId);" in client_content,
+ 'startThoughtPolling accepts message id': 'export function startThoughtPolling(conversationId, messageId = null)' in client_content,
+ 'startStreamingThoughtPolling accepts message id': 'export function startStreamingThoughtPolling(conversationId, messageId = null)' in client_content,
+ }
+
+ all_passed = True
+ for name, passed in checks.items():
+ status = 'PASS' if passed else 'FAIL'
+ print(f' [{status}] {name}')
+ if not passed:
+ all_passed = False
+
+ assert all_passed
+
+
+if __name__ == '__main__':
+ tests = [
+ test_pending_thoughts_support_optional_message_id_scope,
+ test_pending_thought_route_forwards_query_message_id,
+ test_pending_thought_client_builds_scoped_request_urls,
+ ]
+
+ success = True
+ passed = 0
+ total = len(tests)
+
+ for test in tests:
+ print(f'\n🧪 Running {test.__name__}...')
+ try:
+ test()
+ passed += 1
+ except Exception:
+ success = False
+ import traceback
+
+ traceback.print_exc()
+
+ print(f'\n📊 Results: {passed}/{total} tests passed')
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_personal_agent_dropdown_in_chats_fix.py b/functional_tests/test_personal_agent_dropdown_in_chats_fix.py
new file mode 100644
index 00000000..df5175a0
--- /dev/null
+++ b/functional_tests/test_personal_agent_dropdown_in_chats_fix.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python3
+"""
+Functional test for personal agent dropdown visibility in chats.
+Version: 0.236.061
+Implemented in: 0.236.061
+
+This test ensures that chat agent dropdown logic treats invalid active group IDs
+("None", "null", "undefined") as empty so personal agents are included.
+"""
+
+import os
+import sys
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
+
+def _read_file(path):
+ with open(path, "r", encoding="utf-8") as handle:
+ return handle.read()
+
+
+def _assert_normalization_guard(js_text, file_label):
+ required_snippets = [
+ "rawGroupId",
+ "activeGroupId",
+ "['none', 'null', 'undefined']",
+ ]
+ missing = [snippet for snippet in required_snippets if snippet not in js_text]
+ if missing:
+ raise AssertionError(
+ f"Missing normalization guard in {file_label}: {', '.join(missing)}"
+ )
+
+
+def test_personal_agent_dropdown_guard():
+ """Verify chat dropdown guards against bogus active group ids."""
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ chat_agents_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "chat",
+ "chat-agents.js",
+ )
+ chat_retry_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "chat",
+ "chat-retry.js",
+ )
+
+ chat_agents_js = _read_file(chat_agents_path)
+ chat_retry_js = _read_file(chat_retry_path)
+
+ _assert_normalization_guard(chat_agents_js, "chat-agents.js")
+ _assert_normalization_guard(chat_retry_js, "chat-retry.js")
+
+ print("✅ Personal agent dropdown guard is present in chat JS files")
+ return True
+
+
+if __name__ == "__main__":
+ success = test_personal_agent_dropdown_guard()
+ sys.exit(0 if success else 1)
diff --git a/functional_tests/test_personal_agent_dropdown_scope_fix.py b/functional_tests/test_personal_agent_dropdown_scope_fix.py
new file mode 100644
index 00000000..dc0ec81d
--- /dev/null
+++ b/functional_tests/test_personal_agent_dropdown_scope_fix.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python3
+"""
+Functional test for personal agent dropdown scope in chats.
+Version: 0.236.062
+Implemented in: 0.236.062
+
+This test ensures the chat agent dropdown only treats group agents as active
+when the group chat tab is explicitly selected.
+"""
+
+import os
+import sys
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
+
+def _read_file(path):
+ with open(path, "r", encoding="utf-8") as handle:
+ return handle.read()
+
+
+def _assert_group_chat_guard(js_text, file_label):
+ required_snippets = [
+ "activeChatTabType",
+ "isGroupChat",
+ "activeGroupId",
+ ]
+ missing = [snippet for snippet in required_snippets if snippet not in js_text]
+ if missing:
+ raise AssertionError(
+ f"Missing group chat scope guard in {file_label}: {', '.join(missing)}"
+ )
+
+
+def test_personal_agent_dropdown_scope_guard():
+ """Verify agent dropdown uses explicit group chat tab selection."""
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ chat_agents_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "chat",
+ "chat-agents.js",
+ )
+ chat_retry_path = os.path.join(
+ repo_root,
+ "application",
+ "single_app",
+ "static",
+ "js",
+ "chat",
+ "chat-retry.js",
+ )
+
+ chat_agents_js = _read_file(chat_agents_path)
+ chat_retry_js = _read_file(chat_retry_path)
+
+ _assert_group_chat_guard(chat_agents_js, "chat-agents.js")
+ _assert_group_chat_guard(chat_retry_js, "chat-retry.js")
+
+ print("✅ Agent dropdown scope guard present for group chat only")
+ return True
+
+
+if __name__ == "__main__":
+ success = test_personal_agent_dropdown_scope_guard()
+ sys.exit(0 if success else 1)
diff --git a/functional_tests/test_personal_agent_user_id_saved.py b/functional_tests/test_personal_agent_user_id_saved.py
new file mode 100644
index 00000000..2a76d36b
--- /dev/null
+++ b/functional_tests/test_personal_agent_user_id_saved.py
@@ -0,0 +1,52 @@
+# test_personal_agent_user_id_saved.py
+#!/usr/bin/env python3
+"""
+Functional test for personal agent user_id persistence.
+Version: 0.236.050
+Implemented in: 0.236.050
+
+This test ensures personal agent saves assign user_id to the persisted payload.
+"""
+
+import os
+
+
+def read_file_text(file_path):
+ with open(file_path, "r", encoding="utf-8") as file:
+ return file.read()
+
+
+def test_personal_agent_user_id_saved():
+ repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ file_path = os.path.join(repo_root, "application", "single_app", "functions_personal_agents.py")
+ content = read_file_text(file_path)
+
+ assert "agent_data['user_id'] = user_id" in content, "Expected user_id to be set on persisted agent payload."
+ assert "agent_data['last_updated']" in content, "Expected last_updated to be set on persisted agent payload."
+
+ print("✅ Personal agent save user_id persistence verified.")
+
+
+def run_tests():
+ tests = [test_personal_agent_user_id_saved]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ raise SystemExit(0 if run_tests() else 1)
diff --git a/functional_tests/test_profile_dashboard.py b/functional_tests/test_profile_dashboard.py
index 1fa05f5d..f85d8baa 100644
--- a/functional_tests/test_profile_dashboard.py
+++ b/functional_tests/test_profile_dashboard.py
@@ -40,7 +40,7 @@ def test_profile_endpoints():
item=test_user_id,
partition_key=test_user_id
)
- except:
+ except Exception as ex:
# Create test user settings
user_doc = {
'id': test_user_id,
@@ -316,7 +316,7 @@ def test_profile_endpoints():
item=activity['id'],
partition_key=activity['user_id']
)
- except:
+ except Exception as ex:
pass
# Optionally delete test user (uncomment if needed)
@@ -325,7 +325,7 @@ def test_profile_endpoints():
# item=test_user_id,
# partition_key=test_user_id
# )
- # except:
+ # except Exception as ex:
# pass
print(" ✅ Test data cleaned up successfully")
diff --git a/functional_tests/test_streaming_only_chat_path.py b/functional_tests/test_streaming_only_chat_path.py
index 540e08a6..053fe747 100644
--- a/functional_tests/test_streaming_only_chat_path.py
+++ b/functional_tests/test_streaming_only_chat_path.py
@@ -2,7 +2,7 @@
#!/usr/bin/env python3
"""
Functional test for streaming-only chat path migration.
-Version: 0.239.137
+Version: 0.239.185
Implemented in: 0.239.137
This test ensures that first-party chat clients use the streaming chat path,
@@ -30,7 +30,7 @@ def assert_not_contains(file_path: Path, forbidden: str) -> None:
raise AssertionError(f"Did not expect to find {forbidden!r} in {file_path}")
-def test_streaming_only_chat_path() -> bool:
+def test_streaming_only_chat_path() -> None:
print("Testing streaming-only chat path migration...")
chat_messages = ROOT / "application" / "single_app" / "static" / "js" / "chat" / "chat-messages.js"
@@ -62,17 +62,17 @@ def test_streaming_only_chat_path() -> bool:
assert_not_contains(chats_template, "streaming-toggle-btn")
assert_not_contains(chats_template, "streaming-badge")
- assert_contains(route_backend_chats, "return build_background_stream_response(generate_compatibility_response)")
- assert_contains(route_backend_chats, "return build_background_stream_response(generate)")
- assert_contains(config_file, 'VERSION = "0.239.137"')
+ assert_contains(route_backend_chats, "return build_background_stream_response(generate_compatibility_response, stream_session=stream_session)")
+ assert_contains(route_backend_chats, "return build_background_stream_response(generate, stream_session=stream_session)")
+ assert_contains(config_file, 'VERSION = "0.239.185"')
print("Streaming-only chat path checks passed!")
- return True
if __name__ == "__main__":
try:
- success = test_streaming_only_chat_path()
+ test_streaming_only_chat_path()
+ success = True
except Exception as exc:
print(f"Test failed: {exc}")
import traceback
diff --git a/functional_tests/test_streaming_thought_finalization.py b/functional_tests/test_streaming_thought_finalization.py
index 27acfa73..d6f7a359 100644
--- a/functional_tests/test_streaming_thought_finalization.py
+++ b/functional_tests/test_streaming_thought_finalization.py
@@ -1,12 +1,13 @@
#!/usr/bin/env python3
# test_streaming_thought_finalization.py
"""
-Functional test for streaming thought finalization fix.
-Version: 0.239.116
-Implemented in: 0.239.116
+Functional test for streaming thought rendering and finalization fixes.
+Version: 0.239.185
+Implemented in: 0.239.185
This test ensures the streaming client buffers split SSE events and prevents
-late thought updates from replacing already-streamed assistant content.
+late or stale thought updates from replacing already-streamed assistant content
+or leaking across consecutive streaming responses.
"""
import os
@@ -22,6 +23,14 @@
ROOT_DIR,
'application', 'single_app', 'static', 'js', 'chat', 'chat-thoughts.js'
)
+PLUGIN_THOUGHTS_FILE = os.path.join(
+ ROOT_DIR,
+ 'application', 'single_app', 'semantic_kernel_plugins', 'plugin_invocation_thoughts.py'
+)
+ROUTE_BACKEND_CHATS_FILE = os.path.join(
+ ROOT_DIR,
+ 'application', 'single_app', 'route_backend_chats.py'
+)
def read_file_content(file_path):
@@ -53,11 +62,11 @@ def test_streaming_parser_buffers_split_sse_events():
if not passed:
all_passed = False
- return all_passed
+ assert all_passed
except Exception as exc:
print(f' [FAIL] Exception: {exc}')
- return False
+ raise
def test_late_thoughts_do_not_replace_streamed_content():
@@ -69,7 +78,8 @@ def test_late_thoughts_do_not_replace_streamed_content():
thoughts_content = read_file_content(THOUGHTS_FILE)
checks = {
- 'content-start tracking': "messageElement.dataset.streamingHasContent = 'true';" in streaming_content,
+ 'content-start helper import': 'markStreamingThoughtContentStarted' in streaming_content,
+ 'content-start helper call': 'markStreamingThoughtContentStarted(messageId);' in streaming_content,
'thought guard before render': "if (!hasStreamedContent && !streamCompleted) {" in streaming_content,
'thought module data guard': "if (messageElement.dataset.streamingHasContent === 'true') {" in thoughts_content,
'thought module early return': 'return;' in thoughts_content,
@@ -82,27 +92,100 @@ def test_late_thoughts_do_not_replace_streamed_content():
if not passed:
all_passed = False
- return all_passed
+ assert all_passed
except Exception as exc:
print(f' [FAIL] Exception: {exc}')
- return False
+ raise
+
+
+def test_streaming_thoughts_are_scoped_to_the_active_message():
+ """Verify streaming thoughts target the current placeholder and include message identity."""
+ print('\n🔍 Testing streaming thought message scoping...')
+
+ try:
+ streaming_content = read_file_content(STREAMING_FILE)
+ thoughts_content = read_file_content(THOUGHTS_FILE)
+ backend_content = read_file_content(ROUTE_BACKEND_CHATS_FILE)
+
+ checks = {
+ 'streaming session starts with placeholder id': 'beginStreamingThoughtSession(tempAiMessageId);' in streaming_content,
+ 'sse thought handler receives placeholder id': 'handleStreamingThought(data, tempAiMessageId);' in streaming_content,
+ 'streaming path no longer starts pending-thought polling': 'startStreamingThoughtPolling(thoughtConversationId);' not in streaming_content,
+ 'thought renderer uses message helper': 'function getStreamingMessageElement(messageId)' in thoughts_content,
+ 'session reset helper': 'function resetStreamingPlaceholderState(messageElement)' in thoughts_content,
+ 'thought renderer tracks active backend message id': 'activeStreamingServerMessageId' in thoughts_content,
+ 'thought renderer ignores mismatched message ids': 'activeStreamingServerMessageId !== thoughtData.message_id' in thoughts_content,
+ 'thought renderer tracks dedupe signature': 'streamingThoughtSignature' in thoughts_content,
+ 'backend thought sse includes message id': "'message_id': assistant_message_id" in backend_content,
+ }
+
+ all_passed = True
+ for name, passed in checks.items():
+ status = 'PASS' if passed else 'FAIL'
+ print(f' [{status}] {name}')
+ if not passed:
+ all_passed = False
+
+ assert all_passed
+
+ except Exception as exc:
+ print(f' [FAIL] Exception: {exc}')
+ raise
+
+
+def test_plugin_invocation_thoughts_stream_live_during_agent_execution():
+ """Verify plugin invocation callbacks can publish live SSE thought events."""
+ print('\n🔍 Testing live plugin invocation thought emission...')
+
+ try:
+ route_content = read_file_content(ROUTE_BACKEND_CHATS_FILE)
+ plugin_thoughts_content = read_file_content(PLUGIN_THOUGHTS_FILE)
+
+ checks = {
+ 'callback helper supports live callback': 'live_thought_callback=None' in plugin_thoughts_content,
+ 'callback publishes live payload': "live_payload['step_index'] = thought_tracker.current_index - 1" in plugin_thoughts_content,
+ 'stream route defines background publisher': 'def publish_background_event(event_text):' in route_content,
+ 'stream route defines live plugin publisher': 'def publish_live_plugin_thought(thought_payload):' in route_content,
+ 'agent callback registers live publisher': 'live_thought_callback=publish_live_plugin_thought' in route_content,
+ 'completion replay removed': 'thought_tracker.current_index += 1' not in route_content,
+ }
+
+ all_passed = True
+ for name, passed in checks.items():
+ status = 'PASS' if passed else 'FAIL'
+ print(f' [{status}] {name}')
+ if not passed:
+ all_passed = False
+
+ assert all_passed
+
+ except Exception as exc:
+ print(f' [FAIL] Exception: {exc}')
+ raise
if __name__ == '__main__':
tests = [
test_streaming_parser_buffers_split_sse_events,
test_late_thoughts_do_not_replace_streamed_content,
+ test_streaming_thoughts_are_scoped_to_the_active_message,
+ test_plugin_invocation_thoughts_stream_live_during_agent_execution,
]
- results = []
+ success = True
+ passed = 0
+ total = len(tests)
for test in tests:
print(f'\n🧪 Running {test.__name__}...')
- results.append(test())
-
- passed = sum(1 for result in results if result)
- total = len(results)
- success = passed == total
+ try:
+ test()
+ passed += 1
+ except Exception:
+ success = False
+ import traceback
+
+ traceback.print_exc()
print(f'\n📊 Results: {passed}/{total} tests passed')
sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_workspace_multi_endpoints.py b/functional_tests/test_workspace_multi_endpoints.py
new file mode 100644
index 00000000..df5a7f0a
--- /dev/null
+++ b/functional_tests/test_workspace_multi_endpoints.py
@@ -0,0 +1,155 @@
+# test_workspace_multi_endpoints.py
+"""
+Functional test for workspace multi-endpoint routing.
+Version: 0.239.155
+Implemented in: 0.239.155
+
+This test ensures that workspace multi-endpoint payloads are sanitized and that
+agent payloads accept multi-endpoint selection fields.
+"""
+
+import sys
+import os
+import importlib
+import json
+import types
+
+repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+single_app_root = os.path.join(repo_root, "application", "single_app")
+sys.path.append(repo_root)
+sys.path.append(single_app_root)
+
+from application.single_app.functions_agent_payload import sanitize_agent_payload
+
+
+def _restore_modules(original_modules):
+ for module_name, original_module in original_modules.items():
+ if original_module is None:
+ sys.modules.pop(module_name, None)
+ else:
+ sys.modules[module_name] = original_module
+
+
+def _load_functions_settings_module():
+ config_stub = types.ModuleType("config")
+ config_stub.json = json
+
+ appinsights_stub = types.ModuleType("functions_appinsights")
+ appinsights_stub.log_event = lambda *args, **kwargs: None
+
+ cache_stub = types.ModuleType("app_settings_cache")
+ cache_stub.get_settings_cache = lambda: None
+ cache_stub.update_settings_cache = lambda settings: None
+
+ original_modules = {}
+ for module_name, module_stub in {
+ "config": config_stub,
+ "functions_appinsights": appinsights_stub,
+ "app_settings_cache": cache_stub,
+ }.items():
+ original_modules[module_name] = sys.modules.get(module_name)
+ sys.modules[module_name] = module_stub
+
+ original_modules["application.single_app.functions_settings"] = sys.modules.get("application.single_app.functions_settings")
+ sys.modules.pop("application.single_app.functions_settings", None)
+ module = importlib.import_module("application.single_app.functions_settings")
+ return module, original_modules
+
+
+def test_model_endpoint_sanitization():
+ """Ensure secrets are stripped and flags are preserved."""
+ functions_settings, original_modules = _load_functions_settings_module()
+ endpoints = [
+ {
+ "id": "endpoint-1",
+ "name": "Personal Endpoint",
+ "provider": "aoai",
+ "auth": {
+ "type": "api_key",
+ "api_key": "super-secret"
+ },
+ "connection": {
+ "endpoint": "https://example.openai.azure.com",
+ "openai_api_version": "2024-05-01-preview"
+ },
+ "models": [
+ {"id": "model-1", "deploymentName": "gpt-4o", "enabled": True}
+ ]
+ }
+ ]
+ try:
+ sanitized = functions_settings.sanitize_model_endpoints_for_frontend(endpoints)
+ assert sanitized[0]["auth"].get("api_key") is None
+ assert sanitized[0]["has_api_key"] is True
+
+ service_principal_endpoints = [
+ {
+ "id": "endpoint-2",
+ "name": "Foundry Endpoint",
+ "provider": "aifoundry",
+ "auth": {
+ "type": "service_principal",
+ "client_secret": "client-secret-value"
+ },
+ "connection": {
+ "endpoint": "https://foundry.example.azure.com",
+ "openai_api_version": "2024-05-01-preview"
+ },
+ "models": []
+ }
+ ]
+ sanitized_sp = functions_settings.sanitize_model_endpoints_for_frontend(service_principal_endpoints)
+ assert sanitized_sp[0]["auth"].get("client_secret") is None
+ assert sanitized_sp[0]["has_client_secret"] is True
+ finally:
+ _restore_modules(original_modules)
+
+
+def test_agent_payload_multi_endpoint_fields():
+ """Ensure agent payload accepts multi-endpoint selection fields."""
+ agent_payload = {
+ "id": "123e4567-e89b-12d3-a456-426614174000",
+ "name": "workspace_agent",
+ "display_name": "Workspace Agent",
+ "description": "Test agent",
+ "instructions": "Be helpful",
+ "actions_to_load": [],
+ "other_settings": {},
+ "max_completion_tokens": 256,
+ "agent_type": "local",
+ "is_global": False,
+ "is_group": False,
+ "model_endpoint_id": "endpoint-1",
+ "model_id": "model-1",
+ "model_provider": "aoai",
+ "azure_openai_gpt_deployment": "gpt-4o"
+ }
+ sanitized = sanitize_agent_payload(agent_payload)
+ assert sanitized.get("model_endpoint_id") == "endpoint-1"
+ assert sanitized.get("model_id") == "model-1"
+ assert sanitized.get("model_provider") == "aoai"
+
+
+def run_tests():
+ tests = [test_model_endpoint_sanitization, test_agent_payload_multi_endpoint_fields]
+ results = []
+
+ for test in tests:
+ print(f"\n🧪 Running {test.__name__}...")
+ try:
+ test()
+ print("✅ Test passed")
+ results.append(True)
+ except Exception as exc:
+ print(f"❌ Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ results.append(False)
+
+ success = all(results)
+ print(f"\n📊 Results: {sum(results)}/{len(results)} tests passed")
+ return success
+
+
+if __name__ == "__main__":
+ sys.exit(0 if run_tests() else 1)
diff --git a/msgraph_test_output.txt b/msgraph_test_output.txt
new file mode 100644
index 00000000..24e1c1a5
--- /dev/null
+++ b/msgraph_test_output.txt
@@ -0,0 +1,66 @@
+C:\Repos\simplechatmsft\.venv\Lib\site-packages\requests\__init__.py:113: RequestsDependencyWarning: urllib3 (2.6.3) or chardet (7.3.0)/charset_normalizer (3.4.6) doesn't match a supported version!
+ warnings.warn(
+[Plugin Function Logger] Function completed successfully
+Plugin function executed successfully
+[Plugin Invocation] MSGraphPlugin.get_my_messages
+[Plugin Function Logger] Function completed successfully
+Plugin function executed successfully
+[Plugin Invocation] MSGraphPlugin.get_my_timezone
+[Plugin Function Logger] Function completed successfully
+[Log] [Plugin Function Logger] Decorating function for plugin -- {'function_name': 'get_my_profile', 'plugin_name': 'MSGraphPlugin'}
+[Log] [Plugin Function Logger] Decorating function for plugin -- {'function_name': 'get_my_timezone', 'plugin_name': 'MSGraphPlugin'}
+[Log] [Plugin Function Logger] Decorating function for plugin -- {'function_name': 'get_my_events', 'plugin_name': 'MSGraphPlugin'}
+[Log] [Plugin Function Logger] Decorating function for plugin -- {'function_name': 'get_my_messages', 'plugin_name': 'MSGraphPlugin'}
+[Log] [Plugin Function Logger] Decorating function for plugin -- {'function_name': 'search_users', 'plugin_name': 'MSGraphPlugin'}
+[Log] [Plugin Function Logger] Decorating function for plugin -- {'function_name': 'get_user_by_email', 'plugin_name': 'MSGraphPlugin'}
+[Log] [Plugin Function Logger] Decorating function for plugin -- {'function_name': 'list_drive_items', 'plugin_name': 'MSGraphPlugin'}
+[Log] [Plugin Function Logger] Decorating function for plugin -- {'function_name': 'get_my_security_alerts', 'plugin_name': 'MSGraphPlugin'}
+
+Running test_msgraph_plugin_exposes_expected_operations...
+Testing Microsoft Graph plugin metadata and function registration...
+Microsoft Graph plugin metadata includes the expected operations
+
+Running test_msgraph_plugin_paginates_and_truncates_list_results...
+Testing Microsoft Graph helper pagination behavior...
+[Log] [Plugin Function Logger] Function call started -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'get_my_messages'}
+[Log] [Plugin Function Logger] Function parameters -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'get_my_messages', 'parameters': {'top': 3}, 'param_string': 'top=3'}
+Warning: Failed to get settings from cache, read from Cosmos DB instead. Called from C:\Repos\simplechatmsft\application\single_app\functions_debug.py:46 in debug_print().
+[DEBUG] [INFO]: [MSGraphPlugin] get_my_messages requesting https://graph.microsoft.com/v1.0/me/mailFolders/inbox/messages params={'$top': 3, '$select': 'id,subject,from,receivedDateTime,isRead,importance,webLink', '$orderby': 'receivedDateTime desc'}
+Warning: Failed to get settings from cache, read from Cosmos DB instead. Called from C:\Repos\simplechatmsft\application\single_app\functions_debug.py:46 in debug_print().
+[DEBUG] [INFO]: [MSGraphPlugin] get_my_messages requesting https://graph.microsoft.com/v1.0/me/messages?page=2 params=None
+[Log] [Plugin Function Logger] Function completed successfully -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'get_my_messages', 'result_preview': "{'operation': 'get_my_messages', 'count': 3, 'value': [{'id': '1'}, {'id': '2'}, {'id': '3'}], 'next_link': None, 'truncated': True}", 'duration_ms': 190.74654579162598, 'full_function_name': 'MSGraphPlugin.get_my_messages'}
+Warning: Failed to get settings from cache, read from Cosmos DB instead. Called from C:\Repos\simplechatmsft\application\single_app\functions_debug.py:46 in debug_print().
+[DEBUG] [INFO]: [Plugin SUCCESS] MSGraphPlugin.get_my_messages (190.7ms)
+[Log] Plugin function executed successfully -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'get_my_messages', 'duration_ms': 190.74654579162598, 'success': True, 'user_id': None, 'timestamp': '2026-03-26T03:02:20.809761', 'parameter_count': 1, 'parameters': {'top': '3'}, 'result_preview': "{'operation': 'get_my_messages', 'count': 3, 'value': [{'id': '1'}, {'id': '2'}, {'id': '3'}], 'next_link': None, 'truncated': True}", 'result_type': 'dict'}
+[Log] [Plugin Invocation] MSGraphPlugin.get_my_messages -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'get_my_messages', 'duration_ms': 190.74654579162598, 'success': True, 'user_id': None, 'timestamp': '2026-03-26T03:02:20.809761', 'parameter_count': 1, 'result_type': 'dict', 'error_message': None, 'parameters': {'top': '3'}, 'result_preview': "{'operation': 'get_my_messages', 'count': 3, 'value': [{'id': '1'}, {'id': '2'}, {'id': '3'}], 'next_link': None, 'truncated': True}"}
+Microsoft Graph helper paginates and truncates list results safely
+
+Running test_msgraph_plugin_reads_mailbox_timezone...
+Testing Microsoft Graph mailbox timezone lookup...
+[Log] [Plugin Function Logger] Function call started -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'get_my_timezone'}
+[Log] [Plugin Function Logger] Function parameters -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'get_my_timezone', 'parameters': {}, 'param_string': 'no parameters'}
+Warning: Failed to get settings from cache, read from Cosmos DB instead. Called from C:\Repos\simplechatmsft\application\single_app\functions_debug.py:46 in debug_print().
+[DEBUG] [INFO]: [MSGraphPlugin] get_my_timezone requesting https://graph.microsoft.com/v1.0/me/mailboxSettings params={}
+[Log] [Plugin Function Logger] Function completed successfully -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'get_my_timezone', 'result_preview': "{'operation': 'get_my_timezone', 'time_zone': 'Pacific Standard Time', 'date_format': 'M/d/yyyy', 'time_format': 'h:mm tt', 'language': {'locale': 'en-US', 'displayName': 'English (United States)'}, '...", 'duration_ms': 95.86191177368164, 'full_function_name': 'MSGraphPlugin.get_my_timezone'}
+Warning: Failed to get settings from cache, read from Cosmos DB instead. Called from C:\Repos\simplechatmsft\application\single_app\functions_debug.py:46 in debug_print().
+[DEBUG] [INFO]: [Plugin SUCCESS] MSGraphPlugin.get_my_timezone (95.9ms)
+[Log] Plugin function executed successfully -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'get_my_timezone', 'duration_ms': 95.86191177368164, 'success': True, 'user_id': None, 'timestamp': '2026-03-26T03:02:21.010018', 'result_preview': "{'operation': 'get_my_timezone', 'time_zone': 'Pacific Standard Time', 'date_format': 'M/d/yyyy', 'time_format': 'h:mm tt', 'language': {'locale': 'en-US', 'displayName': 'English (United States)'}, '...", 'result_type': 'dict'}
+[Log] [Plugin Invocation] MSGraphPlugin.get_my_timezone -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'get_my_timezone', 'duration_ms': 95.86191177368164, 'success': True, 'user_id': None, 'timestamp': '2026-03-26T03:02:21.010018', 'parameter_count': 0, 'result_type': 'dict', 'error_message': None, 'result_preview': '{\'operation\': \'get_my_timezone\', \'time_zone\': \'Pacific Standard Time\', \'date_format\': \'M/d/yyyy\', \'time_format\': \'h:mm tt\', \'language\': {\'locale\': \'en-US\', \'displayName\': \'English (United States)\'}, \'working_hours_time_zone\': {\'name\': \'Pacific Standard Time\'}, \'message\': "Use the user\'s mailbox time_zone instead of assuming UTC when answering user-facing date and time questions."}'}
+Microsoft Graph plugin reads mailbox timezone settings safely
+
+Running test_msgraph_plugin_surfaces_token_consent_errors...
+Testing Microsoft Graph token consent error handling...
+[Log] [Plugin Function Logger] Function call started -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'search_users'}
+[Log] [Plugin Function Logger] Function parameters -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'search_users', 'parameters': {'arg_0': 'Ada'}, 'param_string': 'arg_0=Ada'}
+Warning: Failed to get settings from cache, read from Cosmos DB instead. Called from C:\Repos\simplechatmsft\application\single_app\functions_debug.py:46 in debug_print().
+[DEBUG] [INFO]: [MSGraphPlugin] search_users token acquisition failed: {'error': 'consent_required', 'message': 'Consent is required.', 'consent_url': 'https://example.test/consent', 'scopes': ['User.ReadBasic.All'], 'operation': 'search_users'}
+[Log] [Plugin Function Logger] Function completed successfully -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'search_users', 'result_preview': "{'error': 'consent_required', 'message': 'Consent is required.', 'consent_url': 'https://example.test/consent', 'scopes': ['User.ReadBasic.All'], 'operation': 'search_users'}", 'duration_ms': 95.50833702087402, 'full_function_name': 'MSGraphPlugin.search_users'}
+Plugin function executed successfully
+[Plugin Invocation] MSGraphPlugin.search_users
+Warning: Failed to get settings from cache, read from Cosmos DB instead. Called from C:\Repos\simplechatmsft\application\single_app\functions_debug.py:46 in debug_print().
+[DEBUG] [INFO]: [Plugin SUCCESS] MSGraphPlugin.search_users (95.5ms)
+[Log] Plugin function executed successfully -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'search_users', 'duration_ms': 95.50833702087402, 'success': True, 'user_id': None, 'timestamp': '2026-03-26T03:02:21.202137', 'parameter_count': 1, 'parameters': {'arg_0': 'Ada'}, 'result_preview': "{'error': 'consent_required', 'message': 'Consent is required.', 'consent_url': 'https://example.test/consent', 'scopes': ['User.ReadBasic.All'], 'operation': 'search_users'}", 'result_type': 'dict'}
+[Log] [Plugin Invocation] MSGraphPlugin.search_users -- {'plugin_name': 'MSGraphPlugin', 'function_name': 'search_users', 'duration_ms': 95.50833702087402, 'success': True, 'user_id': None, 'timestamp': '2026-03-26T03:02:21.202137', 'parameter_count': 1, 'result_type': 'dict', 'error_message': None, 'parameters': {'arg_0': 'Ada'}, 'result_preview': "{'error': 'consent_required', 'message': 'Consent is required.', 'consent_url': 'https://example.test/consent', 'scopes': ['User.ReadBasic.All'], 'operation': 'search_users'}"}
+Microsoft Graph plugin surfaces consent requirements safely
+
+Results: 4/4 tests passed
diff --git a/scripts/Register-ResourceProviders.ps1 b/scripts/Register-ResourceProviders.ps1
new file mode 100644
index 00000000..0664c4f3
--- /dev/null
+++ b/scripts/Register-ResourceProviders.ps1
@@ -0,0 +1,186 @@
+<#
+.SYNOPSIS
+Registers Azure resource providers required to deploy Simple Chat.
+
+.DESCRIPTION
+Scans a predefined list of provider namespaces derived from the deployer Bicep templates
+and registers any that are not already registered. Optionally waits for registration
+completion and supports retries with exponential backoff.
+
+.PARAMETER SubscriptionId
+Optional subscription ID to target. If provided, the script will set the active subscription.
+
+.PARAMETER Providers
+Optional list of provider namespaces to register. Defaults to the providers required by Simple Chat.
+
+.PARAMETER Wait
+If specified, waits for each provider to reach the "Registered" state.
+
+.PARAMETER MaxWaitMinutes
+Maximum time to wait for a provider registration to complete.
+
+.PARAMETER MaxRetries
+Maximum number of retries for transient registration failures.
+
+.PARAMETER RetryDelaySeconds
+Initial delay (in seconds) between retries. Uses exponential backoff.
+
+.EXAMPLE
+# Register all required providers in the current subscription
+.\Register-ResourceProviders.ps1 -Wait
+
+.EXAMPLE
+# Register providers in a specific subscription
+.\Register-ResourceProviders.ps1 -SubscriptionId "00000000-0000-0000-0000-000000000000" -Wait
+#>
+
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory = $false)]
+ [string] $SubscriptionId,
+
+ [Parameter(Mandatory = $false)]
+ [string[]] $Providers = @(
+ 'Microsoft.Authorization',
+ 'Microsoft.Cache',
+ 'Microsoft.CognitiveServices',
+ 'Microsoft.ContainerRegistry',
+ 'Microsoft.DocumentDB',
+ 'Microsoft.Insights',
+ 'Microsoft.KeyVault',
+ 'Microsoft.ManagedIdentity',
+ 'Microsoft.Network',
+ 'Microsoft.OperationalInsights',
+ 'Microsoft.Resources',
+ 'Microsoft.Search',
+ 'Microsoft.Storage',
+ 'Microsoft.VideoIndexer',
+ 'Microsoft.Web'
+ ),
+
+ [Parameter(Mandatory = $false)]
+ [switch] $Wait,
+
+ [Parameter(Mandatory = $false)]
+ [int] $MaxWaitMinutes = 20,
+
+ [Parameter(Mandatory = $false)]
+ [int] $MaxRetries = 3,
+
+ [Parameter(Mandatory = $false)]
+ [int] $RetryDelaySeconds = 5
+)
+
+$ErrorActionPreference = 'Stop'
+
+function Write-Log {
+ param(
+ [Parameter(Mandatory = $true)]
+ [string] $Message,
+
+ [Parameter(Mandatory = $false)]
+ [ValidateSet('INFO', 'WARN', 'ERROR')]
+ [string] $Level = 'INFO'
+ )
+
+ $timestamp = (Get-Date).ToString('u')
+ Write-Host "[$timestamp][$Level] $Message"
+}
+
+function Ensure-AzCli {
+ if (-not (Get-Command az -ErrorAction SilentlyContinue)) {
+ throw 'Azure CLI (az) is required but was not found in PATH. Install Azure CLI and run "az login".'
+ }
+}
+
+function Get-ProviderState {
+ param(
+ [Parameter(Mandatory = $true)]
+ [string] $Namespace
+ )
+
+ $state = az provider show --namespace $Namespace --query "registrationState" -o tsv
+ if (-not $state) {
+ return 'NotRegistered'
+ }
+
+ return $state
+}
+
+function Register-ProviderWithRetry {
+ param(
+ [Parameter(Mandatory = $true)]
+ [string] $Namespace
+ )
+
+ $attempt = 0
+ $delay = $RetryDelaySeconds
+
+ while ($attempt -lt $MaxRetries) {
+ try {
+ $attempt++
+ Write-Log "Registering provider: $Namespace (attempt $attempt/$MaxRetries)"
+ az provider register --namespace $Namespace | Out-Null
+ return
+ }
+ catch {
+ if ($attempt -ge $MaxRetries) {
+ throw
+ }
+
+ Write-Log "Retrying provider registration for $Namespace in $delay seconds..." 'WARN'
+ Start-Sleep -Seconds $delay
+ $delay = [Math]::Min($delay * 2, 60)
+ }
+ }
+}
+
+function Wait-ForProvider {
+ param(
+ [Parameter(Mandatory = $true)]
+ [string] $Namespace
+ )
+
+ $deadline = (Get-Date).AddMinutes($MaxWaitMinutes)
+ while ((Get-Date) -lt $deadline) {
+ $state = Get-ProviderState -Namespace $Namespace
+ if ($state -eq 'Registered') {
+ Write-Log "Provider registered: $Namespace"
+ return
+ }
+
+ Write-Log "Waiting for provider $Namespace to register (current: $state)..." 'INFO'
+ Start-Sleep -Seconds 10
+ }
+
+ throw "Timed out waiting for provider $Namespace to register."
+}
+
+try {
+ Ensure-AzCli
+
+ if ($SubscriptionId) {
+ Write-Log "Setting subscription to $SubscriptionId"
+ az account set --subscription $SubscriptionId | Out-Null
+ }
+
+ foreach ($provider in $Providers) {
+ $state = Get-ProviderState -Namespace $provider
+ if ($state -eq 'Registered') {
+ Write-Log "Already registered: $provider"
+ continue
+ }
+
+ Register-ProviderWithRetry -Namespace $provider
+
+ if ($Wait) {
+ Wait-ForProvider -Namespace $provider
+ }
+ }
+
+ Write-Log 'Provider registration complete.'
+}
+catch {
+ Write-Log $_.Exception.Message 'ERROR'
+ throw
+}
diff --git a/scripts/resolve_multiendpoint_gpt.py b/scripts/resolve_multiendpoint_gpt.py
new file mode 100644
index 00000000..d6f7a6b6
--- /dev/null
+++ b/scripts/resolve_multiendpoint_gpt.py
@@ -0,0 +1,416 @@
+# resolve_multiendpoint_gpt.py
+"""
+Emulate the multi-endpoint GPT resolver and send a test chat completion.
+
+This script pulls settings from Cosmos DB (simplechat/settings), resolves the selected
+model endpoint, then sends a GPT-only chat request with detailed logging.
+"""
+
+import argparse
+import json
+import logging
+import os
+from urllib.parse import urlparse
+
+from azure.cosmos import CosmosClient
+from azure.cosmos.exceptions import CosmosResourceNotFoundError
+from azure.identity import ClientSecretCredential, DefaultAzureCredential, get_bearer_token_provider
+from dotenv import load_dotenv
+from openai import AzureOpenAI, OpenAI
+
+
+def configure_logging(verbose):
+ logging.basicConfig(
+ level=logging.DEBUG if verbose else logging.INFO,
+ format="[%(levelname)s] %(message)s"
+ )
+
+
+def get_env_value(name):
+ value = os.getenv(name, "")
+ return value.strip()
+
+
+def resolve_authority(auth_settings):
+ management_cloud = (auth_settings.get("management_cloud") or "public").lower()
+ if management_cloud == "government":
+ return "https://login.microsoftonline.us"
+ if management_cloud == "custom":
+ custom_authority = auth_settings.get("custom_authority") or ""
+ return custom_authority.strip() or None
+ return None
+
+
+def infer_foundry_scope_from_endpoint(endpoint):
+ if not endpoint:
+ return "https://ai.azure.com/.default"
+ host = urlparse(endpoint).hostname or endpoint
+ host = host.lower()
+ if "azure.us" in host:
+ return "https://ai.azure.us/.default"
+ if "azure.cn" in host:
+ return "https://ai.azure.cn/.default"
+ if "azure.de" in host:
+ return "https://ai.azure.de/.default"
+ return "https://ai.azure.com/.default"
+
+
+def resolve_foundry_scope_for_auth(auth_settings, endpoint=None):
+ auth_type = (auth_settings.get("type") or "managed_identity").lower()
+ if auth_type == "service_principal":
+ management_cloud = (auth_settings.get("management_cloud") or "public").lower()
+ if management_cloud == "government":
+ return "https://ai.azure.us/.default"
+ if management_cloud == "custom":
+ custom_scope = (auth_settings.get("foundry_scope") or "").strip()
+ if not custom_scope:
+ raise ValueError("Foundry scope is required for custom cloud configurations.")
+ return custom_scope
+ return "https://ai.azure.com/.default"
+
+ custom_scope = (auth_settings.get("foundry_scope") or "").strip()
+ if custom_scope:
+ return custom_scope
+ return infer_foundry_scope_from_endpoint(endpoint)
+
+
+def resolve_cognitive_services_scope():
+ azure_env = (get_env_value("AZURE_ENVIRONMENT") or "public").lower()
+ if azure_env == "usgovernment":
+ return "https://cognitiveservices.azure.us/.default"
+ if azure_env == "custom":
+ custom_scope = get_env_value("CUSTOM_COGNITIVE_SERVICES_URL_VALUE")
+ if not custom_scope:
+ raise ValueError("CUSTOM_COGNITIVE_SERVICES_URL_VALUE is required for custom cloud.")
+ return custom_scope
+ return "https://cognitiveservices.azure.com/.default"
+
+
+def normalize_foundry_base_url(endpoint):
+ if not endpoint:
+ raise ValueError("Foundry endpoint is required.")
+ normalized = endpoint.rstrip("/")
+ if "/models" in normalized:
+ return normalized + "/"
+ if "/openai/v1" in normalized:
+ return normalized.replace("/openai/v1", "/models") + "/"
+ if "/openai" in normalized:
+ return normalized.replace("/openai", "/models") + "/"
+ return normalized + "/models/"
+
+
+def get_foundry_api_version_candidates(primary_version, settings):
+ candidates = [primary_version]
+ fallback = (settings.get("azure_openai_gpt_api_version") or "").strip()
+ if fallback:
+ candidates.append(fallback)
+ candidates.extend([
+ "2024-10-01-preview",
+ "2024-07-01-preview",
+ "2024-05-01-preview",
+ "2024-02-01"
+ ])
+ seen = set()
+ unique = []
+ for item in candidates:
+ if not item:
+ continue
+ if item in seen:
+ continue
+ seen.add(item)
+ unique.append(item)
+ return unique
+
+
+def resolve_foundry_inference_api_version(connection, settings):
+ api_version = (connection.get("openai_api_version") or connection.get("api_version") or "").strip()
+ if api_version and api_version != "v1":
+ return api_version
+ fallback = settings.get("azure_openai_gpt_api_version") or "2024-05-01-preview"
+ return fallback
+
+
+def mask_secret(value, visible=4):
+ if not value:
+ return ""
+ if len(value) <= visible:
+ return "*" * len(value)
+ return f"{value[:visible]}***{value[-visible:]}"
+
+
+def log_available_databases(client):
+ try:
+ databases = list(client.list_databases())
+ except Exception as exc:
+ logging.warning("Failed to list databases: %s", exc)
+ return
+ ids = [db.get("id") for db in databases if isinstance(db, dict)]
+ logging.info("Available databases: %s", ", ".join(ids) if ids else "")
+
+
+def log_available_containers(database):
+ try:
+ containers = list(database.list_containers())
+ except Exception as exc:
+ logging.warning("Failed to list containers: %s", exc)
+ return
+ ids = [c.get("id") for c in containers if isinstance(c, dict)]
+ logging.info("Available containers: %s", ", ".join(ids) if ids else "")
+
+
+def fetch_settings_from_cosmos(database_name, container_name, settings_id):
+ cosmos_endpoint = get_env_value("AZURE_COSMOS_ENDPOINT")
+ cosmos_key = get_env_value("AZURE_COSMOS_KEY")
+ if not cosmos_endpoint or not cosmos_key:
+ raise ValueError("AZURE_COSMOS_ENDPOINT and AZURE_COSMOS_KEY must be set in the .env file.")
+
+ logging.info("Connecting to Cosmos DB endpoint: %s", urlparse(cosmos_endpoint).hostname)
+ client = CosmosClient(cosmos_endpoint, credential=cosmos_key)
+ logging.info("Using Cosmos database=%s container=%s settings_id=%s", database_name, container_name, settings_id)
+ database = client.get_database_client(database_name)
+ try:
+ database.read()
+ except CosmosResourceNotFoundError:
+ logging.error("Cosmos database '%s' not found in this account.", database_name)
+ log_available_databases(client)
+ raise ValueError("Cosmos database not found. Check AZURE_COSMOS_ENDPOINT and --database.")
+
+ container = database.get_container_client(container_name)
+ try:
+ container.read()
+ except CosmosResourceNotFoundError:
+ logging.error("Cosmos container '%s' not found in database '%s'.", container_name, database_name)
+ log_available_containers(database)
+ raise ValueError("Cosmos container not found. Check --container.")
+
+ try:
+ settings = container.read_item(item=settings_id, partition_key=settings_id)
+ except CosmosResourceNotFoundError:
+ logging.warning("Settings item not found by id. Attempting query lookup for id=%s", settings_id)
+ query = "SELECT * FROM c WHERE c.id = @id"
+ try:
+ items = list(container.query_items(query=query, parameters=[{"name": "@id", "value": settings_id}], enable_cross_partition_query=True))
+ except CosmosResourceNotFoundError:
+ logging.error("Container query failed. Verify database/container and Cosmos account endpoint.")
+ log_available_containers(database)
+ raise
+ if items:
+ settings = items[0]
+ else:
+ logging.warning("No matching settings id found. Falling back to first item in container.")
+ items = list(container.query_items(query="SELECT TOP 1 * FROM c", enable_cross_partition_query=True))
+ if not items:
+ raise ValueError("No settings documents found in the container.")
+ settings = items[0]
+ logging.info("Settings loaded: enable_multi_model_endpoints=%s", settings.get("enable_multi_model_endpoints"))
+ return settings
+
+
+def build_multi_endpoint_client(auth, provider, endpoint, api_version, settings):
+ auth_type = (auth.get("type") or "managed_identity").lower()
+ if provider == "aifoundry":
+ base_url = normalize_foundry_base_url(endpoint)
+ api_version = resolve_foundry_inference_api_version({"api_version": api_version}, settings)
+ default_query = {"api-version": api_version}
+ if auth_type == "api_key":
+ api_key = auth.get("api_key")
+ if not api_key:
+ raise ValueError("API key is required for the selected endpoint.")
+ logging.info("Using Foundry OpenAI-compatible endpoint: %s", base_url)
+ logging.info("Using Foundry api-version: %s", api_version)
+ return OpenAI(base_url=base_url, api_key=api_key, default_query=default_query)
+
+ authority_override = None
+ if auth_type == "service_principal":
+ authority_override = resolve_authority(auth)
+ credential = ClientSecretCredential(
+ tenant_id=auth.get("tenant_id"),
+ client_id=auth.get("client_id"),
+ client_secret=auth.get("client_secret"),
+ authority=authority_override
+ )
+ else:
+ managed_identity_client_id = auth.get("managed_identity_client_id") or None
+ credential = DefaultAzureCredential(managed_identity_client_id=managed_identity_client_id)
+
+ scope = resolve_foundry_scope_for_auth(auth, endpoint)
+ logging.info("Using Foundry AAD scope: %s", scope)
+ token = credential.get_token(scope).token
+ logging.info("Using Foundry OpenAI-compatible endpoint: %s", base_url)
+ logging.info("Using Foundry api-version: %s", api_version)
+ return OpenAI(base_url=base_url, api_key=token, default_query=default_query)
+
+ if auth_type == "api_key":
+ api_key = auth.get("api_key")
+ if not api_key:
+ raise ValueError("API key is required for the selected endpoint.")
+ return AzureOpenAI(
+ #api_version=api_version,
+ azure_endpoint=endpoint,
+ api_key=api_key
+ )
+
+ if auth_type == "service_principal":
+ authority_override = resolve_authority(auth)
+ credential = ClientSecretCredential(
+ tenant_id=auth.get("tenant_id"),
+ client_id=auth.get("client_id"),
+ client_secret=auth.get("client_secret"),
+ authority=authority_override
+ )
+ scope = resolve_cognitive_services_scope()
+ logging.info("Using service principal scope: %s", scope)
+ token_provider = get_bearer_token_provider(credential, scope)
+ else:
+ managed_identity_client_id = auth.get("managed_identity_client_id") or None
+ credential = DefaultAzureCredential(managed_identity_client_id=managed_identity_client_id)
+ scope = resolve_cognitive_services_scope()
+ logging.info("Using managed identity scope: %s", scope)
+ token_provider = get_bearer_token_provider(credential, scope)
+
+ return AzureOpenAI(
+ #api_version=api_version,
+ azure_endpoint=endpoint,
+ azure_ad_token_provider=token_provider
+ )
+
+
+def resolve_multi_endpoint_gpt_config(settings, model_id, endpoint_id, provider_override=None):
+ enable_multi_model_endpoints = settings.get("enable_multi_model_endpoints", False)
+ enable_gpt_apim = settings.get("enable_gpt_apim", False)
+
+ if not enable_multi_model_endpoints or enable_gpt_apim or not model_id:
+ raise ValueError("Multi-endpoint resolution is not active or model_id is missing.")
+
+ endpoints = settings.get("model_endpoints", []) or []
+ endpoint_cfg = next((e for e in endpoints if e.get("id") == endpoint_id), None)
+ if not endpoint_cfg or not endpoint_cfg.get("enabled", True):
+ raise ValueError("Selected model endpoint is not available.")
+
+ models = endpoint_cfg.get("models", []) or []
+ model_cfg = next((m for m in models if m.get("id") == model_id), None)
+ if not model_cfg or not model_cfg.get("enabled", True):
+ raise ValueError("Selected model is not available.")
+
+ if provider_override and endpoint_cfg.get("provider") and provider_override != endpoint_cfg.get("provider"):
+ raise ValueError("Selected model provider mismatch.")
+
+ gpt_model = model_cfg.get("deploymentName") or model_cfg.get("deployment") or ""
+ if not gpt_model:
+ raise ValueError("Selected model is missing deployment name.")
+
+ connection = endpoint_cfg.get("connection", {}) or {}
+ auth = endpoint_cfg.get("auth", {}) or {}
+ auth_type = (auth.get("type") or "managed_identity").lower()
+ api_version = connection.get("openai_api_version") or connection.get("api_version")
+ endpoint = connection.get("endpoint")
+ provider = (endpoint_cfg.get("provider") or "aoai").lower()
+ if provider == "aifoundry":
+ api_version = resolve_foundry_inference_api_version(connection, settings)
+ logging.info("Using Foundry inference api_version=%s", api_version)
+
+ logging.info("Resolved endpoint: %s", endpoint_cfg.get("name"))
+ logging.info("Resolved provider: %s", provider)
+ logging.info("Resolved deployment: %s", gpt_model)
+ logging.info("Resolved auth_type: %s", auth_type)
+
+ gpt_client = build_multi_endpoint_client(auth, provider, endpoint, api_version, settings)
+ return gpt_client, gpt_model, provider, endpoint, auth, api_version
+
+
+def run_chat_completion(gpt_client, gpt_model, message, reasoning_effort=None, provider=None, endpoint=None, auth=None, api_version=None, settings=None):
+ logging.info("Sending GPT-only request (model=%s)", gpt_model)
+
+ api_params = {
+ "model": gpt_model.lower(),
+ "messages": [
+ {"role": "system", "content": "You are an AI assistant that helps people find information."},
+ {"role": "user", "content": message}
+ ]
+ }
+ if reasoning_effort and reasoning_effort.lower() != "none":
+ api_params["reasoning_effort"] = reasoning_effort
+ logging.info("Using reasoning_effort=%s", reasoning_effort)
+
+ try:
+ print(f"\nAPI Params: {api_params}\n")
+ response = gpt_client.chat.completions.create(**api_params)
+ except Exception as exc:
+ error_str = str(exc).lower()
+ if provider == "aifoundry" and "api version not supported" in error_str:
+ logging.warning("Foundry API version not supported. Retrying with fallback versions...")
+ api_params.pop("reasoning_effort", None)
+ last_error = exc
+ for candidate in get_foundry_api_version_candidates(api_version, settings or {}):
+ if candidate == api_version:
+ continue
+ try:
+ logging.info("Retrying Foundry with api_version=%s", candidate)
+ retry_client = build_multi_endpoint_client(auth or {}, "aifoundry", endpoint, candidate, settings or {})
+ response = retry_client.chat.completions.create(**api_params)
+ break
+ except Exception as retry_exc:
+ last_error = retry_exc
+ logging.debug("Foundry retry failed for api_version=%s: %s", candidate, retry_exc)
+ else:
+ raise last_error
+ else:
+ raise
+ choice = response.choices[0]
+ content = choice.message.content if choice.message else ""
+ logging.info("Response received. Finish reason=%s", getattr(choice, "finish_reason", None))
+ logging.debug("Full response: %s", json.dumps(response.model_dump(), indent=2, default=str))
+ return content
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="Resolve multi-endpoint GPT config and send a test message.")
+ parser.add_argument("--env-path", required=True, help="Path to the .env file containing Cosmos settings.")
+ parser.add_argument("--endpoint-id", required=True, help="Model endpoint ID from settings.")
+ parser.add_argument("--model-id", required=True, help="Model ID from settings.")
+ parser.add_argument("--database", default="SimpleChat", help="Cosmos database name.")
+ parser.add_argument("--container", default="settings", help="Cosmos settings container name.")
+ parser.add_argument("--settings-id", default="app_settings", help="Settings document id.")
+ parser.add_argument("--provider", default=None, help="Optional provider override (aoai/aifoundry).")
+ parser.add_argument("--message", default="Hello from the multi-endpoint resolver.", help="Test message to send.")
+ parser.add_argument("--reasoning-effort", default=None, help="Optional reasoning_effort value.")
+ parser.add_argument("--verbose", action="store_true", help="Enable verbose logging.")
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+ load_dotenv(args.env_path, override=True)
+ configure_logging(args.verbose)
+
+ logging.info("Loading .env from: %s", args.env_path)
+ settings = fetch_settings_from_cosmos(
+ database_name=args.database,
+ container_name=args.container,
+ settings_id=args.settings_id
+ )
+
+ gpt_client, gpt_model, provider, endpoint, auth, api_version = resolve_multi_endpoint_gpt_config(
+ settings,
+ model_id=args.model_id,
+ endpoint_id=args.endpoint_id,
+ provider_override=args.provider
+ )
+
+ response_text = run_chat_completion(
+ gpt_client,
+ gpt_model,
+ message=args.message,
+ reasoning_effort=args.reasoning_effort,
+ provider=provider,
+ endpoint=endpoint,
+ auth=auth,
+ api_version=api_version,
+ settings=settings
+ )
+
+ logging.info("Response text:\n%s", response_text)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ui_tests/requirements.txt b/ui_tests/requirements.txt
new file mode 100644
index 00000000..e5d5c89e
--- /dev/null
+++ b/ui_tests/requirements.txt
@@ -0,0 +1,4 @@
+pytest
+playwright
+azure-mgmt-playwright
+azure-identity
\ No newline at end of file
diff --git a/ui_tests/test_agent_modal_dual_foundry_modes.py b/ui_tests/test_agent_modal_dual_foundry_modes.py
new file mode 100644
index 00000000..9d7bd956
--- /dev/null
+++ b/ui_tests/test_agent_modal_dual_foundry_modes.py
@@ -0,0 +1,70 @@
+# test_agent_modal_dual_foundry_modes.py
+"""
+UI test for dual Foundry agent modal modes.
+Version: 0.239.176
+Implemented in: 0.239.176
+
+This test ensures that the agent modal exposes both Foundry modes and that the
+mode-specific form sections toggle correctly in the browser.
+"""
+
+import os
+from pathlib import Path
+
+import pytest
+from playwright.sync_api import expect
+
+
+BASE_URL = os.getenv("SIMPLECHAT_UI_BASE_URL", "").rstrip("/")
+STORAGE_STATE = os.getenv("SIMPLECHAT_UI_STORAGE_STATE", "")
+
+
+@pytest.mark.ui
+def test_agent_modal_dual_foundry_modes(playwright):
+ """Validate Foundry mode toggling in the agent modal."""
+ if not BASE_URL:
+ pytest.skip("Set SIMPLECHAT_UI_BASE_URL to run this UI test.")
+ if not STORAGE_STATE or not Path(STORAGE_STATE).exists():
+ pytest.skip("Set SIMPLECHAT_UI_STORAGE_STATE to a valid authenticated Playwright storage state file.")
+
+ browser = playwright.chromium.launch()
+ context = browser.new_context(
+ storage_state=STORAGE_STATE,
+ viewport={"width": 1440, "height": 900},
+ )
+ page = context.new_page()
+
+ try:
+ page.goto(f"{BASE_URL}/workspace", wait_until="networkidle")
+ expect(page.locator("#agentModal")).to_be_attached()
+
+ page.evaluate(
+ """
+ () => {
+ const modalEl = document.getElementById('agentModal');
+ if (!modalEl || !window.bootstrap) {
+ return;
+ }
+ const modal = new bootstrap.Modal(modalEl);
+ modal.show();
+ }
+ """
+ )
+
+ expect(page.get_by_label("Foundry (classic)")).to_be_visible()
+ expect(page.get_by_label("New Foundry")).to_be_visible()
+
+ page.get_by_label("New Foundry").check()
+ expect(page.locator("#agent-foundry-fetch-btn-label")).to_have_text("Fetch Applications")
+ expect(page.locator("#agent-foundry-select-label")).to_have_text("New Foundry Application")
+ expect(page.locator("#agent-new-foundry-only")).to_be_visible()
+ expect(page.locator("#agent-classic-foundry-only")).to_be_hidden()
+
+ page.get_by_label("Foundry (classic)").check()
+ expect(page.locator("#agent-foundry-fetch-btn-label")).to_have_text("Fetch Agents")
+ expect(page.locator("#agent-foundry-select-label")).to_have_text("Foundry Agent")
+ expect(page.locator("#agent-classic-foundry-only")).to_be_visible()
+ expect(page.locator("#agent-new-foundry-only")).to_be_hidden()
+ finally:
+ context.close()
+ browser.close()
diff --git a/ui_tests/test_approvals_agent_template_admin_section.py b/ui_tests/test_approvals_agent_template_admin_section.py
new file mode 100644
index 00000000..2d546684
--- /dev/null
+++ b/ui_tests/test_approvals_agent_template_admin_section.py
@@ -0,0 +1,144 @@
+# test_approvals_agent_template_admin_section.py
+"""
+UI test for approvals page agent template admin section.
+Version: 0.239.163
+Implemented in: 0.239.159
+
+This test ensures that the shared approvals page shows the agent template
+approval queue for admins, keeps that section hidden for non-admin users,
+and uses a Bootstrap confirmation modal for template deletion.
+"""
+
+import json
+import os
+from pathlib import Path
+
+import pytest
+from playwright.sync_api import expect
+
+
+BASE_URL = os.getenv("SIMPLECHAT_UI_BASE_URL", "").rstrip("/")
+ADMIN_STORAGE_STATE = os.getenv("SIMPLECHAT_UI_ADMIN_STORAGE_STATE", "")
+NON_ADMIN_STORAGE_STATE = os.getenv("SIMPLECHAT_UI_NON_ADMIN_STORAGE_STATE", "")
+
+
+def _require_ui_env():
+ if not BASE_URL:
+ pytest.skip("Set SIMPLECHAT_UI_BASE_URL to run this UI test.")
+ if not ADMIN_STORAGE_STATE or not Path(ADMIN_STORAGE_STATE).exists():
+ pytest.skip("Set SIMPLECHAT_UI_ADMIN_STORAGE_STATE to a valid authenticated Playwright storage state file.")
+ if not NON_ADMIN_STORAGE_STATE or not Path(NON_ADMIN_STORAGE_STATE).exists():
+ pytest.skip("Set SIMPLECHAT_UI_NON_ADMIN_STORAGE_STATE to a valid authenticated Playwright storage state file.")
+
+
+@pytest.mark.ui
+def test_approvals_agent_template_admin_section(playwright):
+ """Validate admin visibility and Bootstrap delete confirmation behavior on /approvals."""
+ _require_ui_env()
+
+ browser = playwright.chromium.launch()
+ admin_context = browser.new_context(
+ storage_state=ADMIN_STORAGE_STATE,
+ viewport={"width": 1440, "height": 900},
+ )
+ user_context = browser.new_context(
+ storage_state=NON_ADMIN_STORAGE_STATE,
+ viewport={"width": 1440, "height": 900},
+ )
+
+ admin_page = admin_context.new_page()
+ user_page = user_context.new_page()
+
+ approvals_payload = {
+ "success": True,
+ "approvals": [],
+ "total_count": 0,
+ "page": 1,
+ "page_size": 20,
+ "total_pages": 0,
+ }
+ templates_payload = {
+ "templates": [
+ {
+ "id": "template-1",
+ "title": "Pending Template",
+ "display_name": "Pending Template",
+ "helper_text": "A pending admin review template",
+ "description": "Pending admin review template",
+ "status": "pending",
+ "created_by_name": "Template Submitter",
+ "created_by_email": "template-user@example.com",
+ "updated_at": "2026-03-24T10:00:00Z",
+ }
+ ]
+ }
+ template_detail_payload = {
+ "template": {
+ "id": "template-1",
+ "title": "Pending Template",
+ "display_name": "Pending Template",
+ "helper_text": "A pending admin review template",
+ "description": "Pending admin review template",
+ "instructions": "Review these instructions.",
+ "status": "pending",
+ "created_by_name": "Template Submitter",
+ "created_by_email": "template-user@example.com",
+ "created_at": "2026-03-24T09:00:00Z",
+ "updated_at": "2026-03-24T10:00:00Z",
+ "tags": ["approval"],
+ "actions_to_load": []
+ }
+ }
+
+ def fulfill_approvals(route):
+ route.fulfill(status=200, content_type="application/json", body=json.dumps(approvals_payload))
+
+ def fulfill_templates(route):
+ route.fulfill(status=200, content_type="application/json", body=json.dumps(templates_payload))
+
+ def fulfill_template_detail(route):
+ route.fulfill(status=200, content_type="application/json", body=json.dumps(template_detail_payload))
+
+ try:
+ dialog_seen = {"value": False}
+
+ def on_dialog(dialog):
+ dialog_seen["value"] = True
+ dialog.dismiss()
+
+ admin_page.on("dialog", on_dialog)
+ admin_page.route("**/api/approvals?*", fulfill_approvals)
+ admin_page.route("**/api/admin/agent-templates?*", fulfill_templates)
+ admin_page.route("**/api/admin/agent-templates/template-1", fulfill_template_detail)
+ admin_page.goto(f"{BASE_URL}/approvals", wait_until="networkidle")
+
+ admin_section = admin_page.locator("#agent-template-approvals")
+ expect(admin_section).to_be_visible()
+ expect(admin_page.locator("#agent-template-table-body")).to_contain_text("Pending Template")
+
+ admin_page.get_by_role("button", name="Delete").first.click()
+ expect(admin_page.locator("#agentTemplateDeleteConfirmModal")).to_be_visible()
+ expect(admin_page.locator("#agent-template-delete-confirm-name")).to_have_text("Pending Template")
+ expect(admin_page.locator("#agentTemplateDeleteConfirmModal")).to_contain_text("This action cannot be undone.")
+ assert dialog_seen["value"] is False
+ admin_page.locator("#agentTemplateDeleteConfirmModal").get_by_role("button", name="Cancel").click()
+ expect(admin_page.locator("#agentTemplateDeleteConfirmModal")).to_be_hidden()
+
+ admin_page.get_by_role("button", name="View").click()
+ expect(admin_page.locator("#agentTemplateReviewModal")).to_be_visible()
+ admin_page.locator("#agent-template-delete-btn").click()
+ expect(admin_page.locator("#agentTemplateDeleteConfirmModal")).to_be_visible()
+ expect(admin_page.locator("#agentTemplateReviewModal")).to_be_hidden()
+ assert dialog_seen["value"] is False
+ admin_page.locator("#agentTemplateDeleteConfirmModal").get_by_role("button", name="Cancel").click()
+ expect(admin_page.locator("#agentTemplateDeleteConfirmModal")).to_be_hidden()
+ expect(admin_page.locator("#agentTemplateReviewModal")).to_be_visible()
+
+ user_page.route("**/api/approvals?*", fulfill_approvals)
+ user_page.goto(f"{BASE_URL}/approvals", wait_until="networkidle")
+
+ expect(user_page.locator("#agent-template-approvals")).to_have_count(0)
+ finally:
+ admin_context.close()
+ user_context.close()
+ browser.close()
\ No newline at end of file
diff --git a/ui_tests/test_control_center_token_filters.py b/ui_tests/test_control_center_token_filters.py
new file mode 100644
index 00000000..8ee75053
--- /dev/null
+++ b/ui_tests/test_control_center_token_filters.py
@@ -0,0 +1,107 @@
+# test_control_center_token_filters.py
+"""
+UI test for control center token filters.
+Version: 0.239.164
+Implemented in: 0.239.164
+
+This test ensures the token filter controls appear on the Control Center
+dashboard and that applying them forwards the selected values to the
+activity-trends request.
+"""
+
+import os
+from pathlib import Path
+from urllib.parse import parse_qs, urlparse
+
+import pytest
+from playwright.sync_api import expect
+
+
+BASE_URL = os.getenv("SIMPLECHAT_UI_BASE_URL", "").rstrip("/")
+STORAGE_STATE = os.getenv("SIMPLECHAT_UI_STORAGE_STATE", "")
+
+
+@pytest.mark.ui
+def test_control_center_token_filters_forward_query_params(playwright):
+ """Validate token filter controls toggle correctly and update the trends request."""
+ if not BASE_URL:
+ pytest.skip("Set SIMPLECHAT_UI_BASE_URL to run this UI test.")
+ if not STORAGE_STATE or not Path(STORAGE_STATE).exists():
+ pytest.skip("Set SIMPLECHAT_UI_STORAGE_STATE to a valid authenticated Playwright storage state file.")
+
+ browser = playwright.chromium.launch()
+ context = browser.new_context(
+ storage_state=STORAGE_STATE,
+ viewport={"width": 1440, "height": 900},
+ )
+ page = context.new_page()
+ captured_queries = []
+
+ def handle_token_filters(route):
+ route.fulfill(
+ status=200,
+ content_type="application/json",
+ body="""
+ {
+ "success": true,
+ "filters": {
+ "users": [{"id": "user-1", "label": "Ada Lovelace", "display_name": "Ada Lovelace", "email": "ada@example.com"}],
+ "groups": [{"id": "group-1", "name": "Research Group"}],
+ "public_workspaces": [{"id": "workspace-1", "name": "Public Knowledge"}],
+ "models": [{"value": "gpt-4o", "label": "gpt-4o"}],
+ "workspace_types": [
+ {"value": "personal", "label": "Personal"},
+ {"value": "group", "label": "Group"},
+ {"value": "public", "label": "Public"}
+ ],
+ "token_types": [
+ {"value": "chat", "label": "Chat"},
+ {"value": "embedding", "label": "Embedding"}
+ ]
+ }
+ }
+ """
+ )
+
+ def handle_activity_trends(route):
+ query = parse_qs(urlparse(route.request.url).query)
+ captured_queries.append(query)
+ route.fulfill(
+ status=200,
+ content_type="application/json",
+ body='{"success": true, "activity_data": {"logins": {}, "chats": {}, "documents": {}, "personal_documents": {}, "group_documents": {}, "public_documents": {}, "tokens": {}}, "period": "30 days", "start_date": "2026-03-01T00:00:00", "end_date": "2026-03-30T23:59:59"}'
+ )
+
+ try:
+ page.route("**/api/admin/control-center/token-filters", handle_token_filters)
+ page.route("**/api/admin/control-center/activity-trends*", handle_activity_trends)
+
+ page.goto(f"{BASE_URL}/admin/control-center", wait_until="networkidle")
+
+ expect(page.locator("#tokenUserFilter")).to_be_visible()
+ expect(page.locator("#tokenWorkspaceTypeFilter")).to_be_visible()
+ expect(page.locator("#tokenModelFilter")).to_be_visible()
+ expect(page.locator("#tokenTypeFilter")).to_be_visible()
+
+ page.locator("#tokenWorkspaceTypeFilter").select_option("group")
+ expect(page.locator("#tokenGroupFilterContainer")).to_be_visible()
+ expect(page.locator("#tokenPublicWorkspaceFilterContainer")).to_be_hidden()
+
+ page.locator("#tokenUserFilter").select_option("user-1")
+ page.locator("#tokenGroupFilter").select_option("group-1")
+ page.locator("#tokenModelFilter").select_option("gpt-4o")
+ page.locator("#tokenTypeFilter").select_option("chat")
+
+ with page.expect_response(lambda response: "/api/admin/control-center/activity-trends?" in response.url):
+ page.locator("#tokenApplyFiltersBtn").click()
+
+ assert captured_queries, "Expected at least one activity trends request"
+ applied_query = captured_queries[-1]
+ assert applied_query.get("user_id") == ["user-1"]
+ assert applied_query.get("workspace_type") == ["group"]
+ assert applied_query.get("group_id") == ["group-1"]
+ assert applied_query.get("model") == ["gpt-4o"]
+ assert applied_query.get("token_type") == ["chat"]
+ finally:
+ context.close()
+ browser.close()
\ No newline at end of file
diff --git a/ui_tests/test_model_endpoint_request_uses_endpoint_id.py b/ui_tests/test_model_endpoint_request_uses_endpoint_id.py
new file mode 100644
index 00000000..12f5710a
--- /dev/null
+++ b/ui_tests/test_model_endpoint_request_uses_endpoint_id.py
@@ -0,0 +1,88 @@
+# test_model_endpoint_request_uses_endpoint_id.py
+"""
+UI test for model endpoint request identity wiring.
+Version: 0.239.178
+Implemented in: 0.239.178
+
+This test ensures the admin multi-endpoint modal exposes the supported
+providers, shows the APIM provider guidance, and sends the endpoint ID in the
+test-model request payload so the backend can resolve Key Vault-backed secrets.
+"""
+
+import os
+from pathlib import Path
+
+import pytest
+from playwright.sync_api import expect
+
+
+BASE_URL = os.getenv("SIMPLECHAT_UI_BASE_URL", "").rstrip("/")
+STORAGE_STATE = os.getenv("SIMPLECHAT_UI_STORAGE_STATE", "")
+
+
+@pytest.mark.ui
+def test_model_endpoint_request_uses_endpoint_id(playwright):
+ """Validate that the endpoint modal includes the endpoint ID in test requests."""
+ if not BASE_URL:
+ pytest.skip("Set SIMPLECHAT_UI_BASE_URL to run this UI test.")
+ if not STORAGE_STATE or not Path(STORAGE_STATE).exists():
+ pytest.skip("Set SIMPLECHAT_UI_STORAGE_STATE to a valid authenticated Playwright storage state file.")
+
+ browser = playwright.chromium.launch()
+ context = browser.new_context(
+ storage_state=STORAGE_STATE,
+ viewport={"width": 1440, "height": 900},
+ )
+ page = context.new_page()
+ captured_request = {}
+
+ def handle_test_request(route):
+ post_data = route.request.post_data_json or {}
+ captured_request.update(post_data)
+ route.fulfill(
+ status=200,
+ content_type="application/json",
+ body='{"success": true}',
+ )
+
+ try:
+ page.goto(f"{BASE_URL}/admin/settings", wait_until="networkidle")
+ expect(page.locator("#add-model-endpoint-btn")).to_be_visible()
+
+ page.route("**/api/models/test-model", handle_test_request)
+
+ page.locator("#add-model-endpoint-btn").click()
+ expect(page.locator("#modelEndpointModal")).to_be_visible()
+
+ provider_options = page.locator("#model-endpoint-provider option").all_text_contents()
+ assert provider_options == ["Azure OpenAI", "Foundry (classic)", "New Foundry"]
+ expect(page.get_by_text("If using classic Foundry, use Foundry (classic). If using the application-based runtime, use New Foundry.")).to_be_visible()
+
+ page.locator("#model-endpoint-provider").select_option("new_foundry")
+ expect(page.locator("#model-endpoint-openai-api-version")).to_have_value("")
+
+ page.evaluate(
+ """
+ () => {
+ const endpointId = document.getElementById('model-endpoint-id');
+ if (endpointId) {
+ endpointId.value = 'stored-endpoint-123';
+ }
+ }
+ """
+ )
+ page.locator("#model-endpoint-provider").select_option("aoai")
+ page.locator("#model-endpoint-name").fill("Stored Endpoint")
+ page.locator("#model-endpoint-endpoint").fill("https://example.openai.azure.com")
+ page.locator("#model-endpoint-auth-type").select_option("api_key")
+ page.locator("#model-endpoint-api-key").fill("temporary-ui-secret")
+ page.locator("#model-endpoint-add-model-btn").click()
+ page.locator("input[data-deployment-name-for]").first.fill("gpt-4o")
+ page.locator("button[data-action='test-model']").first.click()
+
+ expect(page.locator("#modelEndpointModal")).to_be_visible()
+ assert captured_request.get("id") == "stored-endpoint-123"
+ assert captured_request.get("model", {}).get("deploymentName") == "gpt-4o"
+ finally:
+ context.close()
+ browser.close()
\ No newline at end of file
diff --git a/ui_tests/test_streaming_thought_progression.py b/ui_tests/test_streaming_thought_progression.py
new file mode 100644
index 00000000..264635de
--- /dev/null
+++ b/ui_tests/test_streaming_thought_progression.py
@@ -0,0 +1,127 @@
+# test_streaming_thought_progression.py
+"""
+UI test for streaming thought progression.
+Version: 0.239.185
+Implemented in: 0.239.185
+
+This test ensures the live streaming placeholder keeps advancing to the latest
+thought for the active assistant message and does not inherit stale thought
+state when a new message starts.
+"""
+
+import os
+from pathlib import Path
+
+import pytest
+
+
+BASE_URL = os.getenv("SIMPLECHAT_UI_BASE_URL", "").rstrip("/")
+STORAGE_STATE = os.getenv("SIMPLECHAT_UI_STORAGE_STATE", "")
+
+
+def _require_ui_env():
+ if not BASE_URL:
+ pytest.skip("Set SIMPLECHAT_UI_BASE_URL to run this UI test.")
+ if not STORAGE_STATE or not Path(STORAGE_STATE).exists():
+ pytest.skip("Set SIMPLECHAT_UI_STORAGE_STATE to a valid authenticated Playwright storage state file.")
+
+
+@pytest.mark.ui
+def test_streaming_thought_progression_and_session_isolation(playwright):
+ """Validate live thought progression and stale-session isolation in the browser."""
+ _require_ui_env()
+
+ browser = playwright.chromium.launch()
+ context = browser.new_context(
+ storage_state=STORAGE_STATE,
+ viewport={"width": 1440, "height": 900},
+ )
+ page = context.new_page()
+
+ try:
+ page.goto(f"{BASE_URL}/chats", wait_until="domcontentloaded")
+
+ result = page.evaluate(
+ """
+ async () => {
+ const thoughtsModule = await import('/static/js/chat/chat-thoughts.js');
+ const {
+ beginStreamingThoughtSession,
+ clearStreamingThoughtSession,
+ handleStreamingThought,
+ markStreamingThoughtContentStarted,
+ } = thoughtsModule;
+
+ function createPlaceholder(messageId, initialText = 'Streaming...') {
+ const wrapper = document.createElement('div');
+ wrapper.setAttribute('data-message-id', messageId);
+ wrapper.innerHTML = `
${initialText}
`;
+ document.body.appendChild(wrapper);
+ return wrapper;
+ }
+
+ const oldPlaceholder = createPlaceholder('temp-old');
+ beginStreamingThoughtSession('temp-old');
+ handleStreamingThought({
+ message_id: 'assistant-old',
+ step_index: 0,
+ step_type: 'generation',
+ content: 'Old thought'
+ }, 'temp-old');
+
+ clearStreamingThoughtSession('temp-old');
+
+ const newPlaceholder = createPlaceholder('temp-new');
+ beginStreamingThoughtSession('temp-new');
+ const beforeThought = newPlaceholder.querySelector('.message-text').textContent;
+
+ handleStreamingThought({
+ message_id: 'assistant-new',
+ step_index: 0,
+ step_type: 'search',
+ content: 'Searching for current reply'
+ }, 'temp-new');
+ const afterFirstThought = newPlaceholder.querySelector('.message-text').textContent;
+
+ handleStreamingThought({
+ message_id: 'assistant-new',
+ step_index: 1,
+ step_type: 'generation',
+ content: 'Preparing final answer'
+ }, 'temp-new');
+ const afterSecondThought = newPlaceholder.querySelector('.message-text').textContent;
+
+ markStreamingThoughtContentStarted('temp-new');
+ handleStreamingThought({
+ message_id: 'assistant-new',
+ step_index: 2,
+ step_type: 'generation',
+ content: 'Late thought should be ignored'
+ }, 'temp-new');
+ const afterContentStarted = newPlaceholder.querySelector('.message-text').textContent;
+
+ return {
+ beforeThought,
+ afterFirstThought,
+ afterSecondThought,
+ afterContentStarted,
+ oldPlaceholderText: oldPlaceholder.querySelector('.message-text').textContent,
+ serverMessageId: newPlaceholder.dataset.streamingServerMessageId || null,
+ thoughtIndexAfterContent: newPlaceholder.dataset.streamingThoughtIndex || null,
+ };
+ }
+ """
+ )
+
+ assert result['beforeThought'] == 'Streaming...'
+ assert 'Searching for current reply' in result['afterFirstThought']
+ assert 'Old thought' not in result['afterFirstThought']
+ assert 'Preparing final answer' in result['afterSecondThought']
+ assert 'Searching for current reply' not in result['afterSecondThought']
+ assert result['afterContentStarted'] == result['afterSecondThought']
+ assert result['oldPlaceholderText'] != result['afterFirstThought']
+ assert result['serverMessageId'] == 'assistant-new'
+ assert result['thoughtIndexAfterContent'] is None
+ finally:
+ context.close()
+ browser.close()
\ No newline at end of file
diff --git a/ui_tests/test_upload_agreement_dark_mode.py b/ui_tests/test_upload_agreement_dark_mode.py
new file mode 100644
index 00000000..78a01d0f
--- /dev/null
+++ b/ui_tests/test_upload_agreement_dark_mode.py
@@ -0,0 +1,82 @@
+# test_upload_agreement_dark_mode.py
+"""
+UI test for upload agreement dark mode readability.
+Version: 0.239.166
+Implemented in: 0.239.166
+
+This test ensures the upload agreement modal uses the dark-mode-safe light
+surface styling so the agreement text remains readable when the app theme is
+dark.
+"""
+
+import os
+from pathlib import Path
+
+import pytest
+from playwright.sync_api import expect
+
+
+BASE_URL = os.getenv("SIMPLECHAT_UI_BASE_URL", "").rstrip("/")
+STORAGE_STATE = os.getenv("SIMPLECHAT_UI_STORAGE_STATE", "")
+
+
+@pytest.mark.ui
+def test_upload_agreement_modal_uses_dark_mode_safe_surface(playwright):
+ """Validate the upload agreement content surface remains readable in dark mode."""
+ if not BASE_URL:
+ pytest.skip("Set SIMPLECHAT_UI_BASE_URL to run this UI test.")
+ if not STORAGE_STATE or not Path(STORAGE_STATE).exists():
+ pytest.skip("Set SIMPLECHAT_UI_STORAGE_STATE to a valid authenticated Playwright storage state file.")
+
+ browser = playwright.chromium.launch()
+ context = browser.new_context(
+ storage_state=STORAGE_STATE,
+ viewport={"width": 1440, "height": 900},
+ )
+ page = context.new_page()
+
+ try:
+ page.goto(f"{BASE_URL}/workspace", wait_until="networkidle")
+
+ if page.locator("#userAgreementUploadModal").count() == 0:
+ pytest.skip("User agreement upload modal is not enabled in this environment.")
+
+ page.evaluate(
+ """
+ () => {
+ document.documentElement.setAttribute('data-bs-theme', 'dark');
+ const content = document.getElementById('userAgreementUploadContent');
+ if (content) {
+ content.textContent = 'Sample agreement content for dark mode verification.';
+ }
+ const modalEl = document.getElementById('userAgreementUploadModal');
+ if (modalEl && window.bootstrap) {
+ bootstrap.Modal.getOrCreateInstance(modalEl).show();
+ }
+ }
+ """
+ )
+
+ expect(page.locator("#userAgreementUploadModal")).to_be_visible()
+ content_class_name = page.locator("#userAgreementUploadContent").evaluate(
+ "node => node.className"
+ )
+ assert "bg-light" in content_class_name
+
+ colors = page.locator("#userAgreementUploadContent").evaluate(
+ """
+ node => {
+ const styles = getComputedStyle(node);
+ return {
+ background: styles.backgroundColor,
+ color: styles.color
+ };
+ }
+ """
+ )
+
+ assert colors["background"] != "rgb(248, 249, 250)"
+ assert colors["background"] != colors["color"]
+ finally:
+ context.close()
+ browser.close()
\ No newline at end of file
diff --git a/ui_tests/test_workspace_agent_views_consistency.py b/ui_tests/test_workspace_agent_views_consistency.py
new file mode 100644
index 00000000..e98330f7
--- /dev/null
+++ b/ui_tests/test_workspace_agent_views_consistency.py
@@ -0,0 +1,125 @@
+# test_workspace_agent_views_consistency.py
+"""
+UI test for workspace agent view consistency.
+Version: 0.239.158
+Implemented in: 0.239.157
+
+This test ensures that the personal workspace agents table uses the same
+action ordering as the group workspace table and that group agent grid cards
+show edit and delete actions when the current user can manage group agents.
+"""
+
+import os
+import json
+from pathlib import Path
+
+import pytest
+from playwright.sync_api import expect
+
+
+BASE_URL = os.getenv("SIMPLECHAT_UI_BASE_URL", "").rstrip("/")
+STORAGE_STATE = os.getenv("SIMPLECHAT_UI_STORAGE_STATE", "")
+
+
+def _require_ui_env():
+ if not BASE_URL:
+ pytest.skip("Set SIMPLECHAT_UI_BASE_URL to run this UI test.")
+ if not STORAGE_STATE or not Path(STORAGE_STATE).exists():
+ pytest.skip("Set SIMPLECHAT_UI_STORAGE_STATE to a valid authenticated Playwright storage state file.")
+
+
+@pytest.mark.ui
+def test_workspace_agent_views_consistency(playwright):
+ """Validate personal table ordering and group grid management actions."""
+ _require_ui_env()
+
+ browser = playwright.chromium.launch()
+ context = browser.new_context(
+ storage_state=STORAGE_STATE,
+ viewport={"width": 1440, "height": 900},
+ )
+ page = context.new_page()
+
+ user_agents_payload = [
+ {
+ "name": "researcher_agent",
+ "display_name": "Researcher",
+ "description": "Personal research agent",
+ "is_global": False,
+ }
+ ]
+ group_agents_payload = {
+ "agents": [
+ {
+ "id": "group-agent-1",
+ "name": "ga_ge",
+ "display_name": "Ga Ge",
+ "description": "A group agent using shared tools",
+ "is_global": False,
+ }
+ ]
+ }
+
+ def handle_user_agents(route):
+ route.fulfill(
+ status=200,
+ content_type="application/json",
+ body=json.dumps(user_agents_payload),
+ )
+
+ def handle_group_agents(route):
+ route.fulfill(
+ status=200,
+ content_type="application/json",
+ body=json.dumps(group_agents_payload),
+ )
+
+ try:
+ page.route("**/api/user/agents", handle_user_agents)
+ page.goto(f"{BASE_URL}/workspace", wait_until="networkidle")
+ page.get_by_role("tab", name="Your Agents").click()
+
+ personal_row = page.locator("#agents-table-body tr").first
+ expect(personal_row).to_be_visible()
+ action_buttons = personal_row.locator("td").nth(2).locator("button")
+ expect(action_buttons).to_have_count(4)
+ button_classes = action_buttons.evaluate_all("elements => elements.map((element) => element.className)")
+ assert "view-agent-btn" in button_classes[0]
+ assert "chat-agent-btn" in button_classes[1]
+ assert "edit-agent-btn" in button_classes[2]
+ assert "delete-agent-btn" in button_classes[3]
+
+ page.route("**/api/group/agents", handle_group_agents)
+ page.goto(f"{BASE_URL}/group_workspaces", wait_until="networkidle")
+ page.get_by_role("tab", name="Group Agents").click()
+ page.evaluate(
+ """
+ () => {
+ window.currentGroupStatus = 'active';
+ window.groupWorkspaceContext = {
+ activeGroupId: 'test-group-1',
+ activeGroupName: 'Test Group',
+ userRole: 'Owner',
+ requireOwnerForAgentManagement: false
+ };
+ window.dispatchEvent(new CustomEvent('groupWorkspace:context-changed', {
+ detail: window.groupWorkspaceContext
+ }));
+ if (typeof window.fetchGroupAgents === 'function') {
+ return window.fetchGroupAgents();
+ }
+ return null;
+ }
+ """
+ )
+
+ page.locator("label[for='groupAgents-view-grid']").click()
+ group_card = page.locator("#group-agents-grid-view .item-card").first
+ expect(group_card).to_be_visible()
+ expect(group_card.locator(".item-card-chat-btn")).to_be_visible()
+ expect(group_card.locator(".item-card-view-btn")).to_be_visible()
+ expect(group_card.locator(".item-card-edit-btn")).to_be_visible()
+ expect(group_card.locator(".item-card-delete-btn")).to_be_visible()
+ finally:
+ context.close()
+ browser.close()
\ No newline at end of file