Advanced

AI Governance Frameworks

Lesson 3 of 4 Estimated Time 50 min

AI Governance Frameworks

AI governance ensures responsible development and deployment through regulatory compliance, ethical guidelines, and organizational policies. The EU AI Act, NIST AI Risk Management Framework, and responsible AI principles guide governance implementation.

EU AI Act Compliance

The EU AI Act classifies AI systems by risk and requires different safeguards for each level.

from enum import Enum
from typing import List, Optional

class RiskLevel(Enum):
    MINIMAL = "minimal"
    LOW = "low"
    HIGH = "high"
    PROHIBITED = "prohibited"

@dataclass
class AISystem:
    """AI system subject to governance."""
    system_id: str
    name: str
    risk_level: RiskLevel
    model: str
    intended_use: str
    training_data: str
    deployed_regions: List[str]

class EUAIActCompliance:
    """Ensure compliance with EU AI Act."""

    RISK_ASSESSMENT_CRITERIA = {
        "high_risk": [
            "critical_infrastructure",
            "biometric_identification",
            "law_enforcement",
            "education_employment",
            "credit_assessment"
        ],
        "low_risk": [
            "chatbot",
            "content_recommendation",
            "code_generation"
        ],
        "prohibited": [
            "social_credit",
            "real_time_biometric_mass_surveillance",
            "emotional_recognition"
        ]
    }

    def classify_system(self, system: AISystem) -> RiskLevel:
        """Classify AI system by risk."""
        if system.intended_use in self.RISK_ASSESSMENT_CRITERIA["prohibited"]:
            return RiskLevel.PROHIBITED

        if system.intended_use in self.RISK_ASSESSMENT_CRITERIA["high_risk"]:
            return RiskLevel.HIGH

        if system.intended_use in self.RISK_ASSESSMENT_CRITERIA["low_risk"]:
            return RiskLevel.LOW

        return RiskLevel.MINIMAL

    def get_required_controls(self, risk_level: RiskLevel) -> List[str]:
        """Get required controls by risk level."""
        controls = {
            RiskLevel.PROHIBITED: ["blocked_entirely"],
            RiskLevel.HIGH: [
                "conformity_assessment",
                "impact_assessment",
                "documentation",
                "human_oversight",
                "logging",
                "monitoring"
            ],
            RiskLevel.LOW: [
                "transparency",
                "documentation"
            ],
            RiskLevel.MINIMAL: []
        }

        return controls.get(risk_level, [])

    def generate_compliance_report(self, system: AISystem) -> dict:
        """Generate EU AI Act compliance report."""
        risk_level = self.classify_system(system)
        required_controls = self.get_required_controls(risk_level)

        return {
            "system_id": system.system_id,
            "risk_classification": risk_level.value,
            "required_controls": required_controls,
            "compliant": risk_level != RiskLevel.PROHIBITED,
            "recommended_actions": self._recommend_actions(risk_level)
        }

    def _recommend_actions(self, risk_level: RiskLevel) -> List[str]:
        """Recommend actions for compliance."""
        if risk_level == RiskLevel.PROHIBITED:
            return ["Remove system from EU operation"]

        if risk_level == RiskLevel.HIGH:
            return [
                "Conduct impact assessment",
                "Implement human oversight",
                "Document training data",
                "Set up monitoring dashboard"
            ]

        return ["Maintain transparency documentation"]

NIST AI Risk Management Framework

NIST provides structured approach to AI risk management across governance, design, and operation.

from typing import Dict, List

class NISTRiskFramework:
    """Implement NIST AI Risk Management Framework."""

    RISK_CATEGORIES = {
        "performance": "Model accuracy and reliability issues",
        "security": "Adversarial attacks and model theft",
        "fairness": "Bias and discrimination",
        "transparency": "Explainability and interpretability",
        "accountability": "Responsibility and audit trails"
    }

    def __init__(self):
        self.risk_assessments: Dict[str, dict] = {}

    def assess_risks(
        self,
        system_id: str,
        model_name: str,
        training_data_size: int,
        deployed_contexts: List[str]
    ) -> dict:
        """Conduct NIST AI risk assessment."""
        risks = {}

        # Performance risks
        if training_data_size < 1000:
            risks["performance"] = {
                "level": "high",
                "description": "Small training data may cause generalization issues"
            }

        # Security risks
        if "public_api" in deployed_contexts:
            risks["security"] = {
                "level": "high",
                "description": "Public exposure increases attack surface"
            }

        # Fairness risks (requires data analysis)
        risks["fairness"] = {
            "level": "medium",
            "description": "Monitor for bias across demographic groups"
        }

        self.risk_assessments[system_id] = {
            "model": model_name,
            "assessed_at": datetime.now(),
            "risks": risks
        }

        return risks

    def mitigate_risk(
        self,
        risk_category: str,
        mitigation_strategy: str
    ) -> dict:
        """Document risk mitigation."""
        mitigations = {
            "performance": [
                "Increase training data",
                "Implement ensemble methods",
                "Add monitoring dashboard"
            ],
            "security": [
                "Implement rate limiting",
                "Add adversarial input detection",
                "Encrypt model parameters"
            ],
            "fairness": [
                "Analyze by demographic group",
                "Balance training data",
                "Implement fairness metrics"
            ],
            "transparency": [
                "Add model cards",
                "Implement LIME explanations",
                "Document decision logic"
            ]
        }

        return {
            "category": risk_category,
            "strategies": mitigations.get(risk_category, []),
            "selected": mitigation_strategy
        }

Responsible AI Principles

Implement organizational responsible AI principles across development.

from dataclasses import dataclass

@dataclass
class ResponsibleAIPrinciples:
    """Responsible AI commitments."""
    transparency: bool
    accountability: bool
    fairness: bool
    privacy: bool
    safety: bool

class ResponsibleAIImplementation:
    """Implement responsible AI practices."""

    def __init__(self):
        self.principles = ResponsibleAIPrinciples(
            transparency=True,
            accountability=True,
            fairness=True,
            privacy=True,
            safety=True
        )
        self.implementation_checklist = {}

    def create_model_card(
        self,
        model_name: str,
        intended_use: str,
        performance_metrics: dict,
        limitations: List[str],
        bias_analysis: dict
    ) -> dict:
        """Create model card documenting transparency."""
        return {
            "model_name": model_name,
            "intended_use": intended_use,
            "performance_metrics": performance_metrics,
            "limitations": limitations,
            "bias_analysis": bias_analysis,
            "recommendations": self._generate_recommendations(limitations)
        }

    def _generate_recommendations(self, limitations: List[str]) -> List[str]:
        """Generate recommendations based on limitations."""
        recommendations = []

        if "low_accuracy_minority_groups" in limitations:
            recommendations.append("Collect more minority group training data")

        if "high_latency" in limitations:
            recommendations.append("Implement model compression or distillation")

        if "adversarial_vulnerability" in limitations:
            recommendations.append("Add adversarial training")

        return recommendations

    def implement_human_oversight(
        self,
        high_stakes_decisions: bool,
        flagged_cases_percentage: float = 5.0
    ) -> dict:
        """Implement human oversight for critical decisions."""
        return {
            "human_review_required": high_stakes_decisions,
            "review_percentage": flagged_cases_percentage,
            "escalation_criteria": [
                "Low confidence predictions",
                "Outlier inputs",
                "Fairness concerns"
            ]
        }

Monitoring and Compliance Dashboards

Track compliance metrics continuously.

from datetime import datetime
from typing import Optional

class ComplianceDashboard:
    """Monitor AI governance compliance."""

    def __init__(self):
        self.metrics = {}
        self.alerts = []

    def track_metric(
        self,
        metric_name: str,
        value: float,
        threshold: Optional[float] = None
    ):
        """Track compliance metric."""
        self.metrics[metric_name] = {
            "value": value,
            "timestamp": datetime.now(),
            "threshold": threshold,
            "compliant": value <= threshold if threshold else True
        }

        if threshold and value > threshold:
            self.alerts.append({
                "metric": metric_name,
                "value": value,
                "threshold": threshold,
                "severity": "warning"
            })

    def get_compliance_status(self) -> dict:
        """Get overall compliance status."""
        compliant_metrics = sum(
            1 for m in self.metrics.values() if m["compliant"]
        )
        total_metrics = len(self.metrics)

        return {
            "compliance_percentage": (compliant_metrics / total_metrics * 100)
            if total_metrics > 0 else 0,
            "total_metrics": total_metrics,
            "compliant_metrics": compliant_metrics,
            "alerts": self.alerts,
            "status": "compliant" if len(self.alerts) == 0 else "non_compliant"
        }

Key Takeaway

AI governance requires multi-layered compliance with EU AI Act, NIST frameworks, and responsible AI principles. Document systems with model cards, implement human oversight for high-stakes decisions, and monitor continuously.

Exercises

  1. Classify systems by EU AI Act risk levels
  2. Conduct NIST risk assessment on 3 systems
  3. Create model cards with transparency info
  4. Implement human oversight approval flow
  5. Build compliance dashboard with metrics
  6. Generate governance compliance reports
  7. Document responsible AI principles adoption