Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import json | |
| import os | |
| from document_processor import DocumentProcessor | |
| from langgraph_agent import SoilAnalysisAgent | |
| from crewai_agents import CrewAIGeotechSystem | |
| from soil_visualizer import SoilProfileVisualizer | |
| try: | |
| from config import ( | |
| LLM_PROVIDERS, AVAILABLE_MODELS, | |
| get_available_providers, get_models_for_provider, | |
| get_default_provider_and_model, get_api_key | |
| ) | |
| except ImportError as e: | |
| st.error(f"Configuration import error: {e}") | |
| st.stop() | |
| st.set_page_config( | |
| page_title="Soil Boring Log Analyzer", | |
| page_icon="ποΈ", | |
| layout="wide", | |
| initial_sidebar_state="expanded" | |
| ) | |
| def setup_llm_provider_gui(): | |
| """Setup GUI for temporary LLM provider and API key input""" | |
| st.subheader("π LLM Provider Setup") | |
| st.info("π‘ API keys are used temporarily for this session only and are not saved permanently.") | |
| # Provider selection | |
| provider_options = {provider_info['name']: provider_id | |
| for provider_id, provider_info in LLM_PROVIDERS.items()} | |
| selected_provider_name = st.selectbox( | |
| "Select LLM Provider:", | |
| options=list(provider_options.keys()), | |
| help="Choose your preferred LLM provider" | |
| ) | |
| selected_provider = provider_options[selected_provider_name] | |
| provider_info = LLM_PROVIDERS[selected_provider] | |
| st.markdown(f"**{provider_info['description']}**") | |
| # API key input | |
| session_key = f"temp_api_key_{selected_provider}" | |
| current_key = st.session_state.get(session_key, "") | |
| api_key_input = st.text_input( | |
| f"Enter your {provider_info['name']} API Key:", | |
| value=current_key, | |
| type="password", | |
| placeholder=get_api_key_placeholder(selected_provider), | |
| help=f"{get_provider_help_text(selected_provider)} (Temporary use only - not saved)", | |
| key=f"api_key_input_{selected_provider}" | |
| ) | |
| # Validate and store in session | |
| if api_key_input: | |
| if validate_api_key_format(selected_provider, api_key_input): | |
| st.session_state[session_key] = api_key_input | |
| st.session_state['selected_provider'] = selected_provider | |
| st.success(f"β {provider_info['name']} API key ready for use") | |
| # Show masked key | |
| masked_key = mask_api_key(api_key_input) | |
| st.info(f"π Current key: {masked_key}") | |
| else: | |
| st.error(f"β Invalid API key format for {provider_info['name']}") | |
| if session_key in st.session_state: | |
| del st.session_state[session_key] | |
| else: | |
| st.warning(f"β οΈ Please enter your {provider_info['name']} API key to continue") | |
| if session_key in st.session_state: | |
| del st.session_state[session_key] | |
| return selected_provider, api_key_input | |
| def get_current_provider_and_model(): | |
| """Get current provider and model from session state""" | |
| provider = st.session_state.get('selected_provider') | |
| model = st.session_state.get('selected_model') | |
| # If no provider set, try to get first available one | |
| if not provider: | |
| available_providers = list(LLM_PROVIDERS.keys()) | |
| if available_providers: | |
| provider = available_providers[0] | |
| # If no model set, try to get first available model for provider | |
| if not model and provider: | |
| available_models = get_models_for_provider(provider) | |
| if available_models: | |
| model = list(available_models.keys())[0] | |
| return provider, model | |
| def get_api_key_for_current_provider(): | |
| """Get API key for currently selected provider from session state""" | |
| provider, _ = get_current_provider_and_model() | |
| if provider: | |
| session_key = f"temp_api_key_{provider}" | |
| return st.session_state.get(session_key, "") | |
| return "" | |
| def is_provider_configured(): | |
| """Check if current provider is configured with API key""" | |
| api_key = get_api_key_for_current_provider() | |
| return bool(api_key and api_key.strip()) | |
| def get_api_key_placeholder(provider_id): | |
| """Get placeholder text for API key input""" | |
| placeholders = { | |
| "openrouter": "sk-or-v1-...", | |
| "anthropic": "sk-ant-...", | |
| "google": "AIza..." | |
| } | |
| return placeholders.get(provider_id, "Enter your API key...") | |
| def get_provider_help_text(provider_id): | |
| """Get help text for each provider""" | |
| help_texts = { | |
| "openrouter": "Get your API key from https://openrouter.ai/keys", | |
| "anthropic": "Get your API key from https://console.anthropic.com/", | |
| "google": "Get your API key from https://aistudio.google.com/app/apikey" | |
| } | |
| return help_texts.get(provider_id, "") | |
| def validate_api_key_format(provider_id, api_key): | |
| """Validate API key format for different providers""" | |
| if not api_key: | |
| return False | |
| validation_patterns = { | |
| "openrouter": lambda key: key.startswith("sk-or-"), | |
| "anthropic": lambda key: key.startswith("sk-ant-"), | |
| "google": lambda key: key.startswith("AIza") or key.startswith("GoogleAPIKey") | |
| } | |
| validator = validation_patterns.get(provider_id) | |
| if validator: | |
| return validator(api_key) | |
| return True # Default to True for unknown providers | |
| def mask_api_key(api_key): | |
| """Mask API key for display""" | |
| if not api_key: | |
| return "Not configured" | |
| if len(api_key) > 12: | |
| return api_key[:8] + "..." + api_key[-4:] | |
| return "***configured***" | |
| def initialize_crewai_system(): | |
| """Initialize CrewAI system with current settings""" | |
| provider, model = get_current_provider_and_model() | |
| if not provider or not model: | |
| return | |
| selected_model = st.session_state.get('selected_model', model) | |
| current_api_key = get_api_key_for_current_provider() | |
| # If no API key is available, pass empty string to trigger mock mode | |
| if not current_api_key or not current_api_key.strip(): | |
| current_api_key = "" | |
| st.session_state.crewai_system = CrewAIGeotechSystem( | |
| model=selected_model, | |
| api_key=current_api_key | |
| ) | |
| def run_crewai_analysis(text_content, image_base64, merge_similar, split_thick): | |
| """Run CrewAI analysis workflow""" | |
| try: | |
| from unified_soil_workflow import UnifiedSoilWorkflow | |
| workflow = UnifiedSoilWorkflow() | |
| provider, model = get_current_provider_and_model() | |
| selected_model = st.session_state.get('selected_model', model) | |
| current_api_key = get_api_key_for_current_provider() | |
| # Get initial soil data | |
| soil_data = workflow.analyze_soil_boring_log( | |
| text_content=text_content, | |
| image_base64=image_base64, | |
| model=selected_model, | |
| api_key=current_api_key, | |
| merge_similar=merge_similar, | |
| split_thick=split_thick | |
| ) | |
| if "error" in soil_data: | |
| st.error(f"β Initial Analysis Error: {soil_data['error']}") | |
| return None | |
| # Re-initialize CrewAI system with current settings | |
| initialize_crewai_system() | |
| # Show warning if using mock mode | |
| if not current_api_key or current_api_key.strip() == "": | |
| st.warning("β οΈ No API key available. Using mock analysis for demonstration purposes.") | |
| # Run CrewAI analysis | |
| crewai_results = st.session_state.crewai_system.run_geotechnical_analysis(soil_data) | |
| # Package results for display | |
| analysis_results = { | |
| "soil_data": soil_data, | |
| "analysis_results": { | |
| "validation_stats": soil_data.get("validation_stats", {}), | |
| "optimization": soil_data.get("optimization_results", {}), | |
| "crewai_analysis": crewai_results | |
| } | |
| } | |
| st.session_state.analysis_results = analysis_results | |
| # Display success message | |
| layer_count = len(soil_data.get("soil_layers", [])) | |
| workflow_status = crewai_results.get("status", "unknown") | |
| if workflow_status == "completed_with_revision": | |
| st.success(f"π CrewAI analysis completed with quality control revision! Found {layer_count} soil layers") | |
| st.info("π Senior engineer review required re-investigation - final analysis is more accurate") | |
| elif workflow_status == "error": | |
| st.error(f"β CrewAI analysis failed: {crewai_results.get('error', 'Unknown error')}") | |
| else: | |
| st.success(f"π CrewAI analysis completed! Found {layer_count} soil layers") | |
| st.info("β Analysis passed senior engineer review on first attempt") | |
| return crewai_results | |
| except Exception as e: | |
| st.error(f"β CrewAI workflow error: {str(e)}") | |
| return None | |
| def run_langgraph_analysis(text_content, image_base64): | |
| """Run LangGraph agent analysis""" | |
| agent_results = st.session_state.agent.run_analysis( | |
| text_content=text_content, | |
| image_base64=image_base64 | |
| ) | |
| st.session_state.analysis_results = agent_results | |
| return agent_results | |
| def run_unified_workflow_analysis(text_content, image_base64, merge_similar, split_thick): | |
| """Run unified workflow analysis""" | |
| from unified_soil_workflow import UnifiedSoilWorkflow | |
| # Initialize workflow | |
| workflow = UnifiedSoilWorkflow() | |
| # Get configuration | |
| provider, model = get_current_provider_and_model() | |
| selected_model = st.session_state.get('selected_model', model) | |
| current_api_key = get_api_key_for_current_provider() | |
| # Run unified workflow | |
| soil_data = workflow.analyze_soil_boring_log( | |
| text_content=text_content, | |
| image_base64=image_base64, | |
| model=selected_model, | |
| api_key=current_api_key, | |
| merge_similar=merge_similar, | |
| split_thick=split_thick | |
| ) | |
| # Check if analysis was successful | |
| if "error" in soil_data: | |
| st.error(f"β Unified Workflow Error: {soil_data['error']}") | |
| if "raw_response" in soil_data: | |
| with st.expander("π View Raw LLM Response"): | |
| st.text(soil_data["raw_response"]) | |
| if "errors" in soil_data: | |
| st.error("Detailed errors:") | |
| for error in soil_data["errors"]: | |
| st.error(f" β’ {error}") | |
| return | |
| # Package results for display (compatible with existing UI) | |
| analysis_results = { | |
| "soil_data": soil_data, | |
| "analysis_results": { | |
| "validation_stats": soil_data.get("validation_stats", {}), | |
| "optimization": soil_data.get("optimization_results", {}) | |
| } | |
| } | |
| st.session_state.analysis_results = analysis_results | |
| # Display success message with workflow metadata | |
| workflow_meta = soil_data.get("workflow_metadata", {}) | |
| layer_count = len(soil_data.get("soil_layers", [])) | |
| ss_count = workflow_meta.get("ss_samples", 0) | |
| st_count = workflow_meta.get("st_samples", 0) | |
| st.success(f"π Unified workflow completed! Found {layer_count} soil layers") | |
| st.info(f"π Processing: {ss_count} SS samples, {st_count} ST samples, {workflow_meta.get('processing_steps', 9)} workflow steps") | |
| def main(): | |
| st.title("ποΈ Soil Boring Log Analyzer") | |
| st.markdown("Upload soil boring logs (PDF/Image) to automatically extract and analyze soil layers using AI") | |
| # Show system status | |
| if is_provider_configured(): | |
| provider, _ = get_current_provider_and_model() | |
| if provider: | |
| provider_name = LLM_PROVIDERS[provider]["name"] | |
| st.success(f"β **Ready to use** - Using {provider_name} (API key provided)") | |
| else: | |
| st.info("π§ **Setup Required** - Please enter your API key in the sidebar to start analyzing soil boring logs") | |
| # LLM Provider Management in Sidebar | |
| with st.sidebar: | |
| selected_provider, api_key = setup_llm_provider_gui() | |
| # Only show rest of sidebar if API key is provided | |
| if not is_provider_configured(): | |
| st.warning("β οΈ Please enter a valid API key above to continue") | |
| return | |
| st.markdown("---") | |
| st.header("Upload Document") | |
| uploaded_file = st.file_uploader( | |
| "Choose a soil boring log file", | |
| type=['pdf', 'png', 'jpg', 'jpeg'], | |
| help="Upload PDF or image file of soil boring log" | |
| ) | |
| st.header("Analysis Options") | |
| merge_similar = st.checkbox("Merge similar layers", value=True) | |
| split_thick = st.checkbox("Split thick layers", value=True) | |
| st.subheader("π€ Analysis Method") | |
| analysis_method = st.radio( | |
| "Choose analysis approach:", | |
| ["CrewAI (Two-Agent System)", "LangGraph (Single Agent)", "Unified Workflow"], | |
| help="CrewAI uses two specialized agents with quality control" | |
| ) | |
| # Model selection for selected provider | |
| st.subheader("π€ Model Selection") | |
| if selected_provider: | |
| available_models = get_models_for_provider(selected_provider) | |
| if available_models: | |
| # Create model options for this provider | |
| model_options = {} | |
| for model_id, model_info in available_models.items(): | |
| label = f"{model_info['name']} ({model_info['cost']} cost)" | |
| if model_info['recommended']: | |
| label += " β" | |
| if not model_info.get('supports_images', False): | |
| label += " π" | |
| model_options[label] = model_id | |
| # Default model selection | |
| current_model = st.session_state.get('selected_model') | |
| default_model_label = None | |
| if current_model and current_model in available_models: | |
| for label, model_id in model_options.items(): | |
| if model_id == current_model: | |
| default_model_label = label | |
| break | |
| if not default_model_label and model_options: | |
| default_model_label = list(model_options.keys())[0] | |
| selected_label = st.selectbox( | |
| f"Select Model:", | |
| options=list(model_options.keys()), | |
| index=list(model_options.keys()).index(default_model_label) if default_model_label else 0, | |
| help="β = Recommended | π = Text-only (no image support)" | |
| ) | |
| selected_model = model_options[selected_label] | |
| # Store model selection in session state | |
| st.session_state.selected_model = selected_model | |
| # Show model info | |
| if selected_model in AVAILABLE_MODELS: | |
| model_info = AVAILABLE_MODELS[selected_model] | |
| st.info(f"π‘ {model_info['description']}") | |
| # Show provider info | |
| provider_info = LLM_PROVIDERS[selected_provider] | |
| st.info(f"π Using {provider_info['name']}: {provider_info['description']}") | |
| # Show image support status | |
| if model_info.get('supports_images', False): | |
| st.success("πΌοΈ This model supports both text and image analysis") | |
| else: | |
| st.warning("π This model supports text-only analysis (images will be ignored)") | |
| else: | |
| st.error(f"No models available for {LLM_PROVIDERS[selected_provider]['name']}") | |
| if st.button("π Reset Analysis"): | |
| st.session_state.analysis_results = None | |
| st.rerun() | |
| st.markdown("---") | |
| st.subheader("π Unified Workflow Info") | |
| if st.button("π View Workflow Steps"): | |
| from unified_soil_workflow import UnifiedSoilWorkflow | |
| workflow = UnifiedSoilWorkflow() | |
| workflow_info = workflow.get_workflow_visualization() | |
| st.markdown(workflow_info) | |
| st.markdown("---") | |
| st.subheader("π§ͺ Test with Sample Data") | |
| if st.button("π Load Sample Boring Log"): | |
| sample_text = '''SOIL BORING LOG | |
| Project: Sample Geotechnical Investigation | |
| Boring: BH-01 | |
| Location: Main Street, Sample City | |
| Date: 2024-06-24 | |
| Depth: 15.0m | |
| DEPTH (m) | SOIL DESCRIPTION | SPT-N | Su (kPa) | |
| 0.0-1.5 | Brown silty clay, soft, high plasticity | 4 | - | |
| 1.5-3.0 | Gray clay, medium stiff, wet | 8 | - | |
| 3.0-6.0 | Fine to medium sand, loose to medium dense | 12 | - | |
| 6.0-9.0 | Stiff clay, gray, low plasticity | 18 | - | |
| 9.0-12.0 | Coarse sand and gravel, dense | 35 | - | |
| 12.0-15.0 | Very stiff clay, dark gray | 30 | - | |
| Water table encountered at 2.8m depth. | |
| Notes: All strength values from SPT testing. Su calculated using Su=5*N for clay layers. | |
| ''' | |
| with st.spinner("Analyzing sample data with unified workflow..."): | |
| try: | |
| from unified_soil_workflow import UnifiedSoilWorkflow | |
| # Initialize workflow | |
| workflow = UnifiedSoilWorkflow() | |
| # Use selected model and current API key | |
| provider, model = get_current_provider_and_model() | |
| selected_model = st.session_state.get('selected_model', model) | |
| current_api_key = get_api_key_for_current_provider() | |
| # Run unified workflow on sample data | |
| soil_data = workflow.analyze_soil_boring_log( | |
| text_content=sample_text, | |
| model=selected_model, | |
| api_key=current_api_key | |
| ) | |
| if "error" not in soil_data and "soil_layers" in soil_data: | |
| # Package results for display | |
| analysis_results = { | |
| "soil_data": soil_data, | |
| "analysis_results": { | |
| "validation_stats": soil_data.get("validation_stats", {}), | |
| "optimization": soil_data.get("optimization_results", {}) | |
| } | |
| } | |
| st.session_state.analysis_results = analysis_results | |
| layer_count = len(soil_data["soil_layers"]) | |
| workflow_meta = soil_data.get("workflow_metadata", {}) | |
| st.success(f"β Sample analysis completed! Found {layer_count} layers using unified workflow.") | |
| st.info(f"π Sample processing: {workflow_meta.get('ss_samples', 0)} SS, {workflow_meta.get('st_samples', 0)} ST samples") | |
| st.rerun() | |
| else: | |
| st.error("β Sample analysis failed") | |
| if "errors" in soil_data: | |
| for error in soil_data["errors"]: | |
| st.error(f" β’ {error}") | |
| except Exception as e: | |
| st.error(f"β Sample analysis error: {str(e)}") | |
| # Check if provider is configured before proceeding | |
| if not is_provider_configured(): | |
| st.warning("β οΈ Please configure an API key in the sidebar to start using the application") | |
| return | |
| # Initialize components lazily | |
| if 'document_processor' not in st.session_state: | |
| st.session_state.document_processor = DocumentProcessor() | |
| if 'agent' not in st.session_state: | |
| st.session_state.agent = SoilAnalysisAgent() | |
| if 'visualizer' not in st.session_state: | |
| st.session_state.visualizer = SoilProfileVisualizer() | |
| if 'analysis_results' not in st.session_state: | |
| st.session_state.analysis_results = None | |
| # Main content | |
| if uploaded_file is not None: | |
| # Process document | |
| with st.spinner("Processing document..."): | |
| text_content, images, image_base64 = st.session_state.document_processor.process_uploaded_file(uploaded_file) | |
| # Display uploaded content | |
| col1, col2 = st.columns([1, 1]) | |
| with col1: | |
| st.subheader("π Document Content") | |
| if text_content: | |
| st.text_area("Extracted Text", text_content, height=200) | |
| else: | |
| st.info("No text extracted (image-only analysis)") | |
| with col2: | |
| st.subheader("πΌοΈ Document Image") | |
| if images: | |
| st.image(images[0], caption="Soil Boring Log", use_column_width=True) | |
| # Analyze button | |
| if st.button("π Analyze Soil Layers", type="primary"): | |
| if analysis_method == "CrewAI (Two-Agent System)": | |
| with st.spinner("Running CrewAI two-agent geotechnical analysis..."): | |
| # Show unit conversion warning | |
| st.warning("β οΈ **UNIT CONVERSION ALERT**: CrewAI agents will carefully check unit conversions, especially Su values. Ensure your data uses correct units: t/mΒ² β kPa (multiply by 9.81)") | |
| st.warning("π **LAYER SPLITTING ALERT**: CrewAI agents will analyze Su value consistency within layers and split layers when Su values vary by >30% or have >2x ratio") | |
| try: | |
| # Run CrewAI analysis workflow | |
| run_crewai_analysis( | |
| text_content, image_base64, merge_similar, split_thick | |
| ) | |
| except Exception as e: | |
| st.error(f"β CrewAI analysis failed: {str(e)}") | |
| import traceback | |
| st.error("π Full error details:") | |
| st.code(traceback.format_exc()) | |
| elif analysis_method == "LangGraph (Single Agent)": | |
| with st.spinner("Running LangGraph single agent analysis..."): | |
| try: | |
| # Run LangGraph agent analysis | |
| agent_results = run_langgraph_analysis(text_content, image_base64) | |
| layer_count = len(agent_results.get("soil_data", {}).get("soil_layers", [])) | |
| st.success(f"π LangGraph analysis completed! Found {layer_count} soil layers") | |
| except Exception as e: | |
| st.error(f"β LangGraph analysis failed: {str(e)}") | |
| else: # Unified Workflow | |
| with st.spinner("Running unified soil analysis workflow..."): | |
| try: | |
| # Run unified workflow analysis | |
| run_unified_workflow_analysis( | |
| text_content, image_base64, merge_similar, split_thick | |
| ) | |
| except Exception as e: | |
| st.error(f"β Unified workflow failed: {str(e)}") | |
| # Display results | |
| if st.session_state.analysis_results: | |
| display_analysis_results() | |
| def display_analysis_results(): | |
| """Display the analysis results""" | |
| results = st.session_state.analysis_results | |
| # Handle both old agent format and new direct format | |
| if "soil_data" in results: | |
| soil_data = results["soil_data"] | |
| analysis_results = results.get("analysis_results", {}) | |
| else: | |
| # Legacy format from agent | |
| soil_data = results.get("soil_data", {}) | |
| analysis_results = results.get("analysis_results", {}) | |
| if "error" in soil_data: | |
| st.error(f"Analysis Error: {soil_data['error']}") | |
| if "raw_response" in soil_data: | |
| with st.expander("Raw LLM Response"): | |
| st.text(soil_data["raw_response"]) | |
| return | |
| # Display validation recommendations if any | |
| validation_recs = soil_data.get("validation_recommendations", {}) | |
| if validation_recs: | |
| display_validation_recommendations(validation_recs) | |
| # Tabs for different views - add CrewAI tab if CrewAI results exist | |
| tabs = ["π Soil Profile", "π Layer Details", "π§ͺ SS/ST Processing", "π§ Optimization", "π― Nearest Neighbors", "π‘ Insights", "π Export"] | |
| # Add CrewAI tab if CrewAI analysis was performed | |
| if analysis_results.get("crewai_analysis"): | |
| tabs.insert(-1, "π€ CrewAI Analysis") # Insert before Export tab | |
| if len(tabs) == 8: | |
| tab1, tab2, tab3, tab4, tab5, tab6, tab7, tab8 = st.tabs(tabs) | |
| else: | |
| tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs(tabs) | |
| with tab1: | |
| display_soil_profile(soil_data) | |
| with tab2: | |
| display_layer_details(soil_data) | |
| with tab3: | |
| display_ss_st_processing(soil_data) | |
| with tab4: | |
| display_optimization_results(analysis_results) | |
| with tab5: | |
| display_nearest_neighbor_analysis(analysis_results) | |
| with tab6: | |
| display_insights(analysis_results) | |
| if len(tabs) == 8: | |
| with tab7: | |
| display_crewai_analysis(analysis_results) | |
| with tab8: | |
| display_export_options(soil_data) | |
| else: | |
| with tab7: | |
| display_export_options(soil_data) | |
| def display_soil_profile(soil_data): | |
| """Display soil profile visualization""" | |
| st.subheader("Soil Profile Visualization") | |
| if "soil_layers" not in soil_data or not soil_data["soil_layers"]: | |
| st.warning("No soil layers found in analysis") | |
| return | |
| col1, col2 = st.columns([1, 1]) | |
| with col1: | |
| # Soil profile plot | |
| profile_fig = st.session_state.visualizer.create_soil_profile_plot(soil_data) | |
| if profile_fig: | |
| st.plotly_chart(profile_fig, use_container_width=True) | |
| with col2: | |
| # Strength profile plot | |
| strength_fig = st.session_state.visualizer.create_strength_profile_plot(soil_data) | |
| if strength_fig: | |
| st.plotly_chart(strength_fig, use_container_width=True) | |
| # Project information | |
| if "project_info" in soil_data: | |
| st.subheader("Project Information") | |
| proj_info = soil_data["project_info"] | |
| info_col1, info_col2, info_col3 = st.columns(3) | |
| with info_col1: | |
| st.metric("Project", proj_info.get("project_name", "N/A")) | |
| st.metric("Boring ID", proj_info.get("boring_id", "N/A")) | |
| with info_col2: | |
| st.metric("Location", proj_info.get("location", "N/A")) | |
| st.metric("Date", proj_info.get("date", "N/A")) | |
| with info_col3: | |
| st.metric("Total Depth", f"{proj_info.get('depth_total', 0)} m") | |
| if "water_table" in soil_data and soil_data["water_table"].get("depth"): | |
| st.metric("Water Table", f"{soil_data['water_table']['depth']} m") | |
| def display_layer_details(soil_data): | |
| """Display detailed layer information""" | |
| st.subheader("Soil Layer Details") | |
| if "soil_layers" not in soil_data or not soil_data["soil_layers"]: | |
| st.warning("No soil layers found in analysis") | |
| return | |
| # Create summary table | |
| df = st.session_state.visualizer.create_layer_summary_table(soil_data) | |
| if df is not None: | |
| st.dataframe(df, use_container_width=True) | |
| # Individual layer cards | |
| st.subheader("Layer Details") | |
| for i, layer in enumerate(soil_data["soil_layers"]): | |
| with st.expander(f"Layer {layer.get('layer_id', i+1)}: {layer.get('soil_type', 'Unknown')}"): | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.write(f"**Depth:** {layer.get('depth_from', 0)} - {layer.get('depth_to', 0)} m") | |
| st.write(f"**Thickness:** {layer.get('depth_to', 0) - layer.get('depth_from', 0):.1f} m") | |
| st.write(f"**Soil Type:** {layer.get('soil_type', 'N/A')}") | |
| st.write(f"**Color:** {layer.get('color', 'N/A')}") | |
| with col2: | |
| st.write(f"**Strength Parameter:** {layer.get('strength_parameter', 'N/A')}") | |
| st.write(f"**Strength Value:** {layer.get('strength_value', 'N/A')}") | |
| st.write(f"**Moisture:** {layer.get('moisture', 'N/A')}") | |
| st.write(f"**Consistency:** {layer.get('consistency', 'N/A')}") | |
| if layer.get('description'): | |
| st.write(f"**Description:** {layer.get('description')}") | |
| def display_optimization_results(analysis_results): | |
| """Display optimization suggestions""" | |
| st.subheader("Layer Optimization Suggestions") | |
| optimization = analysis_results.get("optimization", {}) | |
| if not optimization: | |
| st.info("No optimization results available") | |
| return | |
| # Merge suggestions | |
| merge_suggestions = optimization.get("merge_suggestions", {}).get("suggestions", []) | |
| if merge_suggestions: | |
| st.subheader("π Merge Suggestions") | |
| for i, suggestion in enumerate(merge_suggestions): | |
| st.info(f"**Suggestion {i+1}:** {suggestion['reason']}") | |
| st.write(f"Layers to merge: {suggestion['layer_indices']}") | |
| else: | |
| st.success("β No merge suggestions - layers are optimally divided") | |
| # Split suggestions | |
| split_suggestions = optimization.get("split_suggestions", {}).get("suggestions", []) | |
| if split_suggestions: | |
| st.subheader("βοΈ Split Suggestions") | |
| for i, suggestion in enumerate(split_suggestions): | |
| st.warning(f"**Suggestion {i+1}:** {suggestion['reason']}") | |
| if "suggested_depths" in suggestion: | |
| st.write(f"Suggested split depths: {suggestion['suggested_depths']}") | |
| else: | |
| st.success("β No split suggestions - layer thicknesses are appropriate") | |
| # Statistics | |
| if "validation_stats" in analysis_results: | |
| st.subheader("π Profile Statistics") | |
| stats = analysis_results["validation_stats"] | |
| col1, col2, col3, col4 = st.columns(4) | |
| with col1: | |
| st.metric("Total Depth", f"{stats.get('total_depth', 0):.1f} m") | |
| with col2: | |
| st.metric("Layer Count", stats.get('layer_count', 0)) | |
| with col3: | |
| st.metric("Avg Thickness", f"{stats.get('average_layer_thickness', 0):.1f} m") | |
| with col4: | |
| st.metric("Thickest Layer", f"{stats.get('thickest_layer', 0):.1f} m") | |
| def display_nearest_neighbor_analysis(analysis_results): | |
| """Display nearest neighbor analysis results""" | |
| st.subheader("π― Nearest Neighbor Analysis") | |
| st.markdown("*Advanced layer grouping using machine learning similarity analysis*") | |
| optimization = analysis_results.get("optimization", {}) | |
| nn_analysis = optimization.get("nearest_neighbor_analysis", {}) | |
| if "error" in nn_analysis: | |
| st.error(f"Analysis error: {nn_analysis['error']}") | |
| return | |
| if "message" in nn_analysis: | |
| st.info(nn_analysis["message"]) | |
| return | |
| # Analysis parameters | |
| params = nn_analysis.get("analysis_parameters", {}) | |
| st.info(f"π Analysis: {params.get('total_layers', 0)} layers, {params.get('k_neighbors', 3)} nearest neighbors, {params.get('similarity_threshold', 0.75)*100:.0f}% similarity threshold") | |
| # Grouping summary | |
| neighbor_groups = nn_analysis.get("neighbor_groups", []) | |
| merge_recommendations = nn_analysis.get("merge_recommendations", []) | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.metric("π Similar Groups Found", len(neighbor_groups)) | |
| with col2: | |
| st.metric("π Merge Recommendations", len(merge_recommendations)) | |
| # Show merge recommendations | |
| if merge_recommendations: | |
| st.subheader("π― Recommended Layer Merging") | |
| for i, rec in enumerate(merge_recommendations): | |
| with st.expander(f"π Recommendation {i+1}: Merge Group {rec.get('group_id', '?')}"): | |
| st.write(f"**Reason:** {rec.get('reason', 'N/A')}") | |
| st.write(f"**Layers to merge:** {', '.join(map(str, rec.get('layer_ids', [])))}") | |
| st.write(f"**Depth ranges:** {', '.join(rec.get('depth_ranges', []))}") | |
| merged_props = rec.get('merged_properties', {}) | |
| if merged_props: | |
| st.write("**Merged layer properties:**") | |
| col1, col2, col3 = st.columns(3) | |
| with col1: | |
| st.write(f"- Soil type: {merged_props.get('soil_type', 'N/A')}") | |
| st.write(f"- Consistency: {merged_props.get('consistency', 'N/A')}") | |
| with col2: | |
| st.write(f"- Depth: {merged_props.get('depth_from', 0):.1f}-{merged_props.get('depth_to', 0):.1f}m") | |
| st.write(f"- Thickness: {merged_props.get('thickness', 0):.1f}m") | |
| with col3: | |
| st.write(f"- Avg strength: {merged_props.get('avg_strength', 0):.1f}") | |
| # Show detailed groups | |
| if neighbor_groups: | |
| st.subheader("π Similar Layer Groups") | |
| for group in neighbor_groups: | |
| group_id = group.get('group_id', '?') | |
| group_size = group.get('group_size', 0) | |
| depth_range = group.get('depth_range', {}) | |
| with st.expander(f"π Group {group_id} ({group_size} layers)"): | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.write("**Group Properties:**") | |
| st.write(f"- Depth range: {depth_range.get('min', 0):.1f}-{depth_range.get('max', 0):.1f}m") | |
| st.write(f"- Total thickness: {depth_range.get('total_thickness', 0):.1f}m") | |
| st.write(f"- Layer IDs: {', '.join(map(str, group.get('layer_ids', [])))}") | |
| with col2: | |
| st.write("**Soil Type Distribution:**") | |
| soil_types = group.get('soil_types', {}) | |
| for soil_type, count in soil_types.items(): | |
| st.write(f"- {soil_type}: {count} layer(s)") | |
| st.write("**Consistency Distribution:**") | |
| consistencies = group.get('consistencies', {}) | |
| for consistency, count in consistencies.items(): | |
| st.write(f"- {consistency}: {count} layer(s)") | |
| # Strength statistics | |
| strength_stats = group.get('strength_stats', {}) | |
| if strength_stats.get('mean', 0) > 0: | |
| st.write("**Strength Statistics:**") | |
| st.write(f"- Mean: {strength_stats.get('mean', 0):.1f}") | |
| st.write(f"- Range: {strength_stats.get('min', 0):.1f} - {strength_stats.get('max', 0):.1f}") | |
| st.write(f"- Std Dev: {strength_stats.get('std', 0):.1f}") | |
| # Show detailed neighbor report | |
| neighbor_report = nn_analysis.get("neighbor_report", "") | |
| if neighbor_report: | |
| st.subheader("π Detailed Neighbor Analysis") | |
| with st.expander("π View Full Neighbor Report"): | |
| st.text(neighbor_report) | |
| # Interactive controls | |
| st.subheader("βοΈ Analysis Controls") | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| new_threshold = st.slider( | |
| "Similarity Threshold", | |
| min_value=0.5, | |
| max_value=0.95, | |
| value=params.get('similarity_threshold', 0.75), | |
| step=0.05, | |
| help="Higher values require more similarity for grouping" | |
| ) | |
| with col2: | |
| new_k = st.slider( | |
| "Number of Neighbors", | |
| min_value=1, | |
| max_value=min(10, params.get('total_layers', 3)-1), | |
| value=params.get('k_neighbors', 3), | |
| help="Number of nearest neighbors to analyze" | |
| ) | |
| if st.button("π Rerun Analysis with New Parameters"): | |
| # This would trigger a reanalysis - for now just show info | |
| st.info("π‘ Reanalysis feature will be available in the feedback processing section") | |
| def display_insights(analysis_results): | |
| """Display AI-generated insights""" | |
| st.subheader("π€ AI-Generated Insights") | |
| insights = analysis_results.get("insights", "") | |
| if insights: | |
| st.markdown(insights) | |
| else: | |
| st.info("No insights available") | |
| # Feedback section | |
| st.subheader("π¬ Provide Feedback") | |
| feedback = st.text_area( | |
| "Provide feedback to improve the analysis:", | |
| placeholder="e.g., 'The clay layer at 5-8m should be split into soft and stiff clay layers'" | |
| ) | |
| if st.button("Submit Feedback"): | |
| if feedback: | |
| with st.spinner("Processing feedback..."): | |
| try: | |
| from llm_client import LLMClient | |
| # Use selected model and current API key | |
| provider, model = get_current_provider_and_model() | |
| selected_model = st.session_state.get('selected_model', model) | |
| current_api_key = get_api_key_for_current_provider() | |
| llm_client = LLMClient(model=selected_model, api_key=current_api_key) | |
| current_results = st.session_state.analysis_results | |
| current_soil_data = current_results.get("soil_data", {}) | |
| # Refine soil layers based on feedback | |
| refined_data = llm_client.refine_soil_layers(current_soil_data, feedback) | |
| if "error" not in refined_data: | |
| # Update with refined data | |
| st.session_state.analysis_results["soil_data"] = refined_data | |
| st.success("β Feedback processed! Analysis updated.") | |
| st.rerun() | |
| else: | |
| st.error(f"β Error processing feedback: {refined_data.get('error', 'Unknown error')}") | |
| except Exception as e: | |
| st.error(f"β Error processing feedback: {str(e)}") | |
| def display_export_options(soil_data): | |
| """Display export options""" | |
| st.subheader("π Export Options") | |
| if "soil_layers" not in soil_data or not soil_data["soil_layers"]: | |
| st.warning("No data to export") | |
| return | |
| export_format = st.selectbox("Select export format:", ["CSV", "JSON", "Text"]) | |
| if st.button("Generate Export"): | |
| try: | |
| if export_format == "CSV": | |
| export_data = st.session_state.visualizer.export_profile_data(soil_data, "csv") | |
| st.download_button( | |
| label="π₯ Download CSV", | |
| data=export_data, | |
| file_name="soil_profile.csv", | |
| mime="text/csv" | |
| ) | |
| elif export_format == "JSON": | |
| export_data = json.dumps(soil_data, indent=2) | |
| st.download_button( | |
| label="π₯ Download JSON", | |
| data=export_data, | |
| file_name="soil_profile.json", | |
| mime="application/json" | |
| ) | |
| else: # Text | |
| export_data = st.session_state.visualizer.export_profile_data(soil_data, "text") | |
| st.download_button( | |
| label="π₯ Download Text", | |
| data=export_data, | |
| file_name="soil_profile.txt", | |
| mime="text/plain" | |
| ) | |
| except Exception as e: | |
| st.error(f"Export failed: {str(e)}") | |
| # Preview export data | |
| with st.expander("Preview Export Data"): | |
| df = st.session_state.visualizer.create_layer_summary_table(soil_data) | |
| if df is not None: | |
| st.dataframe(df) | |
| def display_ss_st_processing(soil_data): | |
| """Display SS/ST sample processing details""" | |
| st.subheader("π§ͺ Split Spoon (SS) & Shelby Tube (ST) Processing") | |
| if "soil_layers" not in soil_data or not soil_data["soil_layers"]: | |
| st.warning("No soil layers found for SS/ST analysis") | |
| return | |
| layers = soil_data["soil_layers"] | |
| # Enhanced Su Value Processing Summary | |
| st.subheader("π Enhanced Su Value Processing") | |
| su_processing_stats = analyze_su_processing(layers) | |
| if su_processing_stats['multiple_su_layers'] > 0: | |
| col1, col2, col3, col4 = st.columns(4) | |
| with col1: | |
| st.metric("Layers with Multiple Su", su_processing_stats['multiple_su_layers']) | |
| with col2: | |
| st.metric("Su Values Averaged", su_processing_stats['averaged_layers']) | |
| with col3: | |
| st.metric("Subdivision Recommended", su_processing_stats['subdivision_recommended']) | |
| with col4: | |
| st.metric("Su Ranges Processed", su_processing_stats['range_processed']) | |
| # Show subdivision recommendations | |
| if su_processing_stats['subdivision_details']: | |
| st.subheader("π Layer Subdivision Recommendations") | |
| for detail in su_processing_stats['subdivision_details']: | |
| st.warning(f"**Layer {detail['layer_id']}**: {detail['reason']}") | |
| st.info(f" β’ Su values found: {detail['su_values']}") | |
| st.info(f" β’ Variation ratio: {detail['ratio']:.1f}x") | |
| # Show averaging results | |
| if su_processing_stats['averaging_details']: | |
| st.subheader("π Su Value Averaging Results") | |
| for detail in su_processing_stats['averaging_details']: | |
| st.success(f"**Layer {detail['layer_id']}**: {detail['description']}") | |
| else: | |
| st.info("No multiple Su values detected in layers - using single values as found") | |
| # Processing summary from the enhanced calculator | |
| processing_summary = soil_data.get("processing_summary", {}) | |
| if processing_summary: | |
| st.subheader("π Processing Summary") | |
| col1, col2, col3, col4 = st.columns(4) | |
| with col1: | |
| st.metric("Total Layers", processing_summary.get('total_layers', 0)) | |
| st.metric("ST Samples", processing_summary.get('st_samples', 0)) | |
| with col2: | |
| st.metric("SS Samples", processing_summary.get('ss_samples', 0)) | |
| st.metric("Clay Layers", processing_summary.get('clay_layers', 0)) | |
| with col3: | |
| st.metric("Sand/Silt Layers", processing_summary.get('sand_layers', 0)) | |
| st.metric("Su Calculated", processing_summary.get('su_calculated', 0)) | |
| with col4: | |
| st.metric("Ο Calculated", processing_summary.get('phi_calculated', 0)) | |
| # Add clay consistency check summary if available | |
| if processing_summary.get('clay_consistency_checks', 0) > 0: | |
| st.subheader("π§ͺ Clay Consistency Checks") | |
| col1, col2, col3 = st.columns(3) | |
| with col1: | |
| st.metric("Total Checks", processing_summary.get('clay_consistency_checks', 0)) | |
| with col2: | |
| st.metric("β Consistent", processing_summary.get('consistent_clays', 0)) | |
| with col3: | |
| st.metric("β οΈ Inconsistent", processing_summary.get('inconsistent_clays', 0)) | |
| # Detailed layer processing | |
| st.subheader("π¬ Layer-by-Layer Processing Details") | |
| for i, layer in enumerate(layers): | |
| layer_id = layer.get('layer_id', i+1) | |
| depth_range = f"{layer.get('depth_from', 0):.1f}-{layer.get('depth_to', 0):.1f}m" | |
| sample_type = layer.get('sample_type', 'Unknown') | |
| soil_type = layer.get('soil_type', 'unknown') | |
| consistency = layer.get('consistency', '') | |
| with st.expander(f"π Layer {layer_id}: {depth_range} - {sample_type} Sample"): | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.write("**Sample Information:**") | |
| st.write(f"- Sample Type: {sample_type}") | |
| st.write(f"- Soil Type: {consistency} {soil_type}") | |
| st.write(f"- Description: {layer.get('description', 'N/A')}") | |
| # Sieve analysis | |
| sieve_200 = layer.get('sieve_200_passing') | |
| if sieve_200 is not None: | |
| st.write(f"- Sieve #200: {sieve_200}% passing") | |
| if sieve_200 > 50: | |
| st.success(" β Classified as fine-grained (clay/silt)") | |
| else: | |
| st.info(" β Classified as coarse-grained (sand/gravel)") | |
| else: | |
| st.write("- Sieve #200: No data") | |
| if soil_type == 'clay': | |
| st.info(" β Assumed >50% passing (clay)") | |
| with col2: | |
| st.write("**Strength Parameters:**") | |
| strength_param = layer.get('strength_parameter', 'N/A') | |
| strength_value = layer.get('strength_value', 'N/A') | |
| strength_unit = layer.get('strength_unit', '') | |
| st.write(f"- Parameter: {strength_param}") | |
| st.write(f"- Value: {strength_value} {strength_unit}") | |
| # Processing method | |
| processing_method = layer.get('processing_method', 'N/A') | |
| st.write(f"- Processing: {processing_method}") | |
| # Show calculation sources | |
| if 'su_source' in layer: | |
| st.info(f"π Su: {layer['su_source']}") | |
| if 'phi_source' in layer: | |
| st.info(f"π Ο: {layer['phi_source']}") | |
| if 'original_spt' in layer: | |
| st.info(f"π Original SPT-N: {layer['original_spt']}") | |
| # Unit weight if calculated | |
| if 'unit_weight' in layer: | |
| unit_weight = layer['unit_weight'] | |
| unit_weight_unit = layer.get('unit_weight_unit', 'kN/mΒ³') | |
| st.write(f"- Unit Weight: {unit_weight:.1f} {unit_weight_unit}") | |
| # Water content and consistency check for clay | |
| if layer.get('soil_type') == 'clay': | |
| water_content = layer.get('water_content') | |
| if water_content is not None: | |
| st.write(f"- Water Content: {water_content}%") | |
| if 'consistency_note' in layer: | |
| if layer['consistency_note'].startswith('β '): | |
| st.success(layer['consistency_note']) | |
| else: | |
| st.warning(layer['consistency_note']) | |
| # SS/ST Processing Guidelines | |
| st.subheader("π Processing Guidelines Applied") | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.write("**ST (Shelby Tube) Samples:**") | |
| st.write("- Use Su values from unconfined compression test") | |
| st.write("- Undisturbed samples for accurate strength") | |
| st.write("- Typical for clay characterization") | |
| st.write("- Units converted to kPa") | |
| with col2: | |
| st.write("**SS (Split Spoon) Samples:**") | |
| st.write("- Use SPT-N values from penetration test") | |
| st.write("- Clay: Convert N to Su using Su = 5ΓN") | |
| st.write("- Sand: Convert N to Ο using Peck method") | |
| st.write("- Standard field testing method") | |
| # Unit conversion summary | |
| st.subheader("π Unit Conversion to SI") | |
| st.write("All measurements converted to SI units:") | |
| st.write("- **Su (Undrained Shear Strength)**: kPa") | |
| st.write(" - ksc (kg/cmΒ²) β kPa (multiply by 98)") | |
| st.write(" - t/mΒ² (tonnes/mΒ²) β kPa (multiply by 9.81)") | |
| st.write(" - psi β kPa (multiply by 6.89)") | |
| st.write(" - psf β kPa (multiply by 0.048)") | |
| st.write("- **Ο (Friction Angle)**: degrees") | |
| st.write("- **Unit Weight**: kN/mΒ³") | |
| st.write("- **Depth**: meters (ft β m, multiply by 0.305)") | |
| # Classification criteria | |
| st.subheader("π― Soil Classification Criteria") | |
| st.write("Sieve analysis (#200) classification:") | |
| st.write("- **>50% passing**: Fine-grained soil (clay/silt)") | |
| st.write("- **<50% passing**: Coarse-grained soil (sand/gravel)") | |
| st.write("- **No data available**: Assumed clay (>50% passing)") | |
| def display_crewai_analysis(analysis_results): | |
| """Display CrewAI two-agent analysis results""" | |
| st.subheader("π€ CrewAI Two-Agent Analysis") | |
| st.markdown("*Advanced geotechnical analysis using specialized agents with quality control*") | |
| # Unit conversion warning/info | |
| st.info("π§ **Unit Conversion Focus**: CrewAI agents specifically check t/mΒ² β kPa conversion (Γ9.81) and other critical unit conversions") | |
| st.info("π **Layer Splitting Focus**: CrewAI agents analyze Su value consistency within layers and split layers when Su values vary by >30% or have >2x ratio") | |
| crewai_analysis = analysis_results.get("crewai_analysis", {}) | |
| if not crewai_analysis: | |
| st.info("No CrewAI analysis results available") | |
| return | |
| # Analysis status | |
| status = crewai_analysis.get("status", "unknown") | |
| workflow = crewai_analysis.get("workflow", "unknown") | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| if status == "approved": | |
| st.success("β Analysis Status: APPROVED") | |
| elif status == "completed_with_revision": | |
| st.warning("π Analysis Status: COMPLETED WITH REVISION") | |
| else: | |
| st.info(f"π Analysis Status: {status.upper()}") | |
| with col2: | |
| st.info(f"π Workflow: {workflow.replace('_', ' ').title()}") | |
| # Display results based on workflow type | |
| if status == "completed_with_revision": | |
| st.subheader("π Multi-Stage Analysis Process") | |
| # Initial analysis | |
| initial_analysis = crewai_analysis.get("initial_analysis", "") | |
| if initial_analysis: | |
| with st.expander("π Initial Geotech Engineer Analysis"): | |
| st.markdown(initial_analysis) | |
| # Initial review | |
| initial_review = crewai_analysis.get("initial_review", "") | |
| if initial_review: | |
| with st.expander("π΅οΈ Senior Engineer Initial Review"): | |
| st.markdown(initial_review) | |
| # Re-investigation | |
| reinvestigation = crewai_analysis.get("reinvestigation", "") | |
| if reinvestigation: | |
| with st.expander("π Re-investigation Based on Review"): | |
| st.markdown(reinvestigation) | |
| # Final review | |
| final_review = crewai_analysis.get("final_review", "") | |
| if final_review: | |
| with st.expander("β Final Senior Review & Approval"): | |
| st.markdown(final_review) | |
| st.success("π― **Quality Control Process**: The senior engineer identified issues in the initial analysis and required re-investigation, resulting in a more accurate final assessment.") | |
| else: | |
| # Single stage approval | |
| st.subheader("β Single-Stage Analysis Process") | |
| # Analysis | |
| analysis = crewai_analysis.get("analysis", "") | |
| if analysis: | |
| with st.expander("π Geotech Engineer Analysis"): | |
| st.markdown(analysis) | |
| # Review | |
| review = crewai_analysis.get("review", "") | |
| if review: | |
| with st.expander("β Senior Engineer Review & Approval"): | |
| st.markdown(review) | |
| st.success("π― **Quality Control Result**: The analysis passed senior engineer review on the first attempt - high confidence in results.") | |
| # Analysis insights | |
| st.subheader("π¬ Agent Specialization Benefits") | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.write("**π¨βπΌ Geotech Engineer Agent:**") | |
| st.write("β’ Focuses on data extraction accuracy") | |
| st.write("β’ Applies standard classification methods") | |
| st.write("β’ Performs comprehensive parameter analysis") | |
| st.write("β’ Documents assumptions and methodology") | |
| with col2: | |
| st.write("**π¨βπ« Senior Geotech Reviewer Agent:**") | |
| st.write("β’ Validates parameter consistency") | |
| st.write("β’ Checks engineering reasonableness") | |
| st.write("β’ Identifies unusual correlations") | |
| st.write("β’ Ensures quality control standards") | |
| # Consistency checks performed | |
| st.subheader("π Consistency Checks Performed") | |
| st.write("The senior engineer agent automatically validates:") | |
| checks = [ | |
| "**CRITICAL: Unit Conversion Accuracy** - t/mΒ² β kPa (Γ9.81), ksc β kPa (Γ98), psi β kPa (Γ6.895)", | |
| "**CRITICAL: Layer Splitting Analysis** - Su value consistency within layers, splitting when variation >30%", | |
| "Su (undrained shear strength) vs Water Content relationships", | |
| "SPT N-values vs Soil Consistency correlations", | |
| "Layer transition logic and continuity", | |
| "Parameter ranges within expected bounds", | |
| "Classification consistency across depth", | |
| "Verification of all conversion factors applied" | |
| ] | |
| for check in checks: | |
| st.write(f"β {check}") | |
| # Recommendations | |
| st.subheader("π‘ CrewAI Analysis Recommendations") | |
| if status == "completed_with_revision": | |
| st.info("π― **Recommendation**: Use the final revised analysis as it has undergone rigorous quality control and addresses all consistency issues identified by the senior engineer.") | |
| st.warning("β οΈ **Note**: Initial analysis contained inconsistencies that were corrected through the re-investigation process.") | |
| else: | |
| st.success("π― **Recommendation**: Analysis is reliable and can be used with confidence as it passed senior engineer review without requiring revision.") | |
| # Comparison note | |
| st.subheader("π Comparison with Other Methods") | |
| st.info("π‘ **Advantage**: CrewAI's two-agent system provides built-in quality control that single-agent approaches lack. The senior engineer agent acts as an independent validator, catching issues that might be missed in single-pass analysis.") | |
| def analyze_su_processing(layers): | |
| """Analyze Su processing statistics from layers""" | |
| stats = { | |
| 'multiple_su_layers': 0, | |
| 'averaged_layers': 0, | |
| 'subdivision_recommended': 0, | |
| 'range_processed': 0, | |
| 'subdivision_details': [], | |
| 'averaging_details': [] | |
| } | |
| for layer in layers: | |
| layer_id = layer.get('layer_id', '?') | |
| # Check for multiple Su processing indicators | |
| if layer.get('su_processing_applied'): | |
| stats['multiple_su_layers'] += 1 | |
| if layer.get('su_averaged'): | |
| stats['averaged_layers'] += 1 | |
| su_values = layer.get('su_values_found', []) | |
| avg_used = layer.get('su_average_used', 0) | |
| stats['averaging_details'].append({ | |
| 'layer_id': layer_id, | |
| 'description': f"Averaged {len(su_values)} Su values to {avg_used:.1f} kPa", | |
| 'su_values': su_values | |
| }) | |
| if layer.get('subdivision_suggested'): | |
| stats['subdivision_recommended'] += 1 | |
| su_values = layer.get('su_values_found', []) | |
| ratio = layer.get('su_variation_ratio', 0) | |
| reason = layer.get('subdivision_reason', 'High variation detected') | |
| stats['subdivision_details'].append({ | |
| 'layer_id': layer_id, | |
| 'reason': reason, | |
| 'su_values': su_values, | |
| 'ratio': ratio | |
| }) | |
| if layer.get('su_range_found'): | |
| stats['range_processed'] += 1 | |
| return stats | |
| def display_validation_recommendations(validation_recs: dict): | |
| """Display validation recommendations for Su-water content issues""" | |
| # Critical unit errors | |
| critical_errors = validation_recs.get("critical_unit_errors", []) | |
| if critical_errors: | |
| st.error("π¨ CRITICAL UNIT CONVERSION ERRORS DETECTED") | |
| with st.expander("β οΈ Critical Issues - Action Required", expanded=True): | |
| st.error("The following Su values appear to be in wrong units:") | |
| for error in critical_errors: | |
| st.error(f"β’ {error}") | |
| st.markdown("### π§ **Recommended Actions:**") | |
| st.warning("1. **Check Unit Conversions Carefully:**") | |
| st.code(""" | |
| t/mΒ² β kPa: multiply by 9.81 | |
| ksc β kPa: multiply by 98.0 | |
| psi β kPa: multiply by 6.895 | |
| MPa β kPa: multiply by 1000 | |
| """) | |
| st.warning("2. **Re-examine Original Document:**") | |
| st.info("β’ Look for Su unit labels in the source document") | |
| st.info("β’ Check if values are consistent with typical ranges") | |
| st.info("β’ Verify water content readings as well") | |
| # Image recheck needed | |
| recheck_needed = validation_recs.get("recheck_image", []) | |
| if recheck_needed: | |
| st.warning("π· IMAGE RECHECK RECOMMENDED") | |
| with st.expander("π Su-Water Content Inconsistencies", expanded=True): | |
| st.warning("The following layers have inconsistent Su-water content relationships:") | |
| for recheck in recheck_needed: | |
| st.warning(f"β’ {recheck}") | |
| st.markdown("### π **Recommended Actions:**") | |
| col1, col2, col3 = st.columns(3) | |
| with col1: | |
| if st.button("π Reload Image", help="Upload the same image again for re-analysis"): | |
| st.info("π Use the file uploader in the sidebar to reload the image") | |
| st.session_state.analysis_results = None | |
| st.rerun() | |
| with col2: | |
| if st.button("π· Upload Different Image", help="Try a different scan/photo of the same document"): | |
| st.info("π Use the file uploader in the sidebar to try a different image") | |
| st.session_state.analysis_results = None | |
| st.rerun() | |
| with col3: | |
| if st.button("π€ Try Different Model", help="Use a different LLM model for analysis"): | |
| st.info("π Select a different model in the sidebar and re-analyze") | |
| st.session_state.analysis_results = None | |
| st.rerun() | |
| st.markdown("### π‘ **What to Check:**") | |
| st.info("β’ Su values and their units (kPa, t/mΒ², ksc, psi, MPa)") | |
| st.info("β’ Water content percentages") | |
| st.info("β’ Image quality and readability") | |
| st.info("β’ Consistency between different test parameters") | |
| # General warnings | |
| general_warnings = validation_recs.get("general_warnings", []) | |
| if general_warnings: | |
| with st.expander("β οΈ General Validation Warnings"): | |
| for warning in general_warnings: | |
| st.warning(f"β’ {warning}") | |
| st.info("π‘ These are minor inconsistencies that may be acceptable depending on local conditions") | |
| if __name__ == "__main__": | |
| main() |