Governance, Compliance, and Ethical Frameworks
Governance, Compliance, and Ethical Frameworks
LLMs are increasingly regulated. Organizations need policies, processes, and oversight to ensure responsible AI. This lesson teaches you to build governance programs, understand regulatory requirements, and implement ethical review processes for AI systems.
Building Prompt Governance Programs
A prompt governance program provides oversight and consistency:
from dataclasses import dataclass
from enum import Enum
from datetime import datetime
from typing import List, Optional
class RiskLevel(Enum):
"""Risk assessment for AI use cases."""
LOW = "low" # Simple customer FAQ
MEDIUM = "medium" # HR hiring assistance
HIGH = "high" # Medical diagnosis
CRITICAL = "critical" # Autonomous weapon control
@dataclass
class PromptGovernancePolicy:
"""Organization-wide governance policy."""
organization: str
created_at: datetime
last_updated: datetime
version: str
policy_areas: dict = None # Different policies by area
def __post_init__(self):
if self.policy_areas is None:
self.policy_areas = {}
class GovernanceFramework:
"""
Organizational framework for governing AI/LLM use.
"""
def __init__(self, organization_name: str):
self.organization = organization_name
self.policies = []
self.review_board = []
self.audit_trail = []
def create_use_case_policy(self,
use_case_name: str,
risk_level: RiskLevel,
requirements: List[str]) -> dict:
"""
Create governance policy for a specific use case.
"""
policy = {
"use_case": use_case_name,
"risk_level": risk_level.value,
"requirements": requirements,
"created_at": datetime.now().isoformat(),
"status": "draft"
}
# Different requirements based on risk level
if risk_level == RiskLevel.LOW:
policy["requirements"].extend([
"Basic prompt review",
"Output spot-checking",
"Annual audit"
])
elif risk_level == RiskLevel.MEDIUM:
policy["requirements"].extend([
"Two-person review of prompts",
"Bias testing required",
"User feedback mechanism",
"Quarterly audits",
"Incident logging"
])
elif risk_level == RiskLevel.HIGH:
policy["requirements"].extend([
"Expert domain review",
"Comprehensive bias/fairness testing",
"Human-in-the-loop for high-impact decisions",
"Monthly security audits",
"Compliance review",
"Risk assessment documentation",
"User consent/disclosure"
])
elif risk_level == RiskLevel.CRITICAL:
policy["requirements"].extend([
"Executive board approval",
"External expert review",
"Continuous monitoring",
"Audit trail of all decisions",
"Regulatory consultation",
"Incident response plan",
"Legal review",
"Public disclosure if applicable"
])
self.policies.append(policy)
return policy
def assign_reviewer(self,
use_case: str,
reviewer_name: str,
expertise: List[str]):
"""Assign reviewers to use cases."""
self.review_board.append({
"use_case": use_case,
"reviewer": reviewer_name,
"expertise": expertise,
"assigned_at": datetime.now().isoformat()
})
def log_prompt_review(self,
use_case: str,
prompt_id: str,
reviewer: str,
approved: bool,
concerns: List[str] = None):
"""Log prompt review decisions."""
entry = {
"timestamp": datetime.now().isoformat(),
"use_case": use_case,
"prompt_id": prompt_id,
"reviewer": reviewer,
"approved": approved,
"concerns": concerns or []
}
self.audit_trail.append(entry)
if not approved:
print(f"PROMPT REJECTED: {prompt_id}")
print(f"Concerns: {concerns}")
return False
return True
def generate_governance_report(self) -> dict:
"""Generate governance compliance report."""
return {
"organization": self.organization,
"use_cases": len(self.policies),
"reviewers": len(self.review_board),
"total_reviews": len(self.audit_trail),
"approval_rate": self._calculate_approval_rate(),
"policies": self.policies,
"audit_trail_count": len(self.audit_trail)
}
def _calculate_approval_rate(self) -> float:
if not self.audit_trail:
return 0
approved = sum(1 for entry in self.audit_trail if entry["approved"])
return approved / len(self.audit_trail)
# Usage
framework = GovernanceFramework("TechCorp")
# Create policies for different use cases
framework.create_use_case_policy(
use_case_name="Customer Support Chatbot",
risk_level=RiskLevel.LOW,
requirements=["Should not collect sensitive customer data"]
)
framework.create_use_case_policy(
use_case_name="Hiring Assistant",
risk_level=RiskLevel.HIGH,
requirements=["Must test for hiring discrimination", "Human review required"]
)
# Log review
framework.log_prompt_review(
use_case="Customer Support Chatbot",
prompt_id="prompt_001",
reviewer="Alice",
approved=True
)
print(framework.generate_governance_report())
Regulatory Landscape
Different regulations apply in different contexts:
from typing import Dict, List
class Regulation:
"""Definition of a regulatory requirement."""
def __init__(self, name: str, jurisdiction: str, requirements: List[str]):
self.name = name
self.jurisdiction = jurisdiction
self.requirements = requirements
class RegulatoryFramework:
"""
Track regulatory requirements across jurisdictions.
"""
REGULATIONS = {
"EU_AI_Act": Regulation(
name="EU AI Act",
jurisdiction="European Union",
requirements=[
"Classify AI systems by risk level",
"Document high-risk AI systems",
"Implement quality and risk management systems",
"Provide transparency and information to users",
"Ensure human oversight",
"Maintain records of AI activities",
"Allow for intervention in critical situations"
]
),
"NIST_AI_RMF": Regulation(
name="NIST AI Risk Management Framework",
jurisdiction="United States",
requirements=[
"Map AI risk to business context",
"Measure and manage AI risks",
"Implement risk mitigation strategies",
"Monitor AI system performance",
"Plan for ongoing updates and monitoring"
]
),
"GDPR": Regulation(
name="General Data Protection Regulation",
jurisdiction="European Union",
requirements=[
"Get informed consent before processing personal data",
"Implement data minimization (collect only necessary data)",
"Ensure right to explanation for automated decisions",
"Implement data deletion mechanisms",
"Conduct data protection impact assessments",
"Appoint data protection officer if processing at scale"
]
),
"CCPA": Regulation(
name="California Consumer Privacy Act",
jurisdiction="California, USA",
requirements=[
"Disclose data collection practices",
"Allow consumers to access their data",
"Allow consumers to delete their data",
"Allow consumers to opt-out of data sharing",
"Implement reasonable security measures",
"Avoid discrimination for exercising rights"
]
),
"HIPAA": Regulation(
name="Health Insurance Portability and Accountability Act",
jurisdiction="United States (Healthcare)",
requirements=[
"Protect patient privacy",
"Limit use of patient data",
"Implement access controls",
"Maintain audit logs",
"Ensure data security",
"Report data breaches"
]
),
"FINRA": Regulation(
name="Financial Industry Regulatory Authority",
jurisdiction="United States (Finance)",
requirements=[
"Suitability: Recommendations appropriate for customer",
"Fair dealing: Honest and fair treatment",
"Disclosure: Reveal conflicts of interest",
"Best execution: Get best possible price",
"Maintain customer communication records"
]
)
}
@staticmethod
def get_applicable_regulations(jurisdiction: str,
industry: str) -> List[Regulation]:
"""Get regulations applicable to a jurisdiction/industry."""
applicable = []
for reg in RegulatoryFramework.REGULATIONS.values():
# Simple matching (real implementation would be more sophisticated)
if jurisdiction in reg.jurisdiction.lower():
applicable.append(reg)
if industry.lower() in reg.name.lower():
applicable.append(reg)
return applicable
@staticmethod
def build_compliance_checklist(regulations: List[Regulation]) -> dict:
"""Build checklist of compliance requirements."""
checklist = {}
for regulation in regulations:
checklist[regulation.name] = {
"jurisdiction": regulation.jurisdiction,
"requirements": {req: False for req in regulation.requirements}
}
return checklist
# Usage
applicable = RegulatoryFramework.get_applicable_regulations(
jurisdiction="EU",
industry="Healthcare"
)
print("Applicable regulations:")
for reg in applicable:
print(f"- {reg.name}: {reg.jurisdiction}")
checklist = RegulatoryFramework.build_compliance_checklist(applicable)
Transparency and Explainability
Users deserve to understand how AI affects them:
class TransparencyFramework:
"""
Ensure transparency in AI/LLM systems.
"""
@staticmethod
def create_ai_disclosure(system_name: str,
capabilities: List[str],
limitations: List[str],
risks: List[str]) -> str:
"""Create user-facing disclosure about AI system."""
disclosure = f"""
TRANSPARENCY DISCLOSURE: {system_name}
What is this system?
This system uses artificial intelligence to assist with {', '.join(capabilities)}.
How it works:
- The system analyzes your input and generates responses
- It learns patterns from training data
- It may sometimes make mistakes or produce biased outputs
What it can do well:
{chr(10).join(f'- {cap}' for cap in capabilities)}
What it cannot do:
{chr(10).join(f'- {lim}' for lim in limitations)}
Risks and limitations:
{chr(10).join(f'- {risk}' for risk in risks)}
When it needs human oversight:
- High-stakes decisions affecting your rights or welfare
- Medical, legal, or financial advice
- Situations where accuracy is critical
Your rights:
- You can ask for explanations of decisions
- You can request human review
- You can request your data be deleted (subject to legal requirements)
- You can report problems or concerns
Questions?
Contact: [contact information]
"""
return disclosure
@staticmethod
def create_explainability_document(decision: str,
input_data: str,
reasoning: str,
confidence: float) -> dict:
"""
Document why the AI made a specific decision.
Supports right to explanation.
"""
return {
"decision": decision,
"input_summary": input_data[:200],
"reasoning": reasoning,
"confidence": confidence,
"factors_considered": [
"Input quality",
"Training data relevance",
"Pattern matching"
],
"uncertainty": 1 - confidence,
"recommendation": "Human review recommended" if confidence < 0.8 else "Low risk decision"
}
# Usage
disclosure = TransparencyFramework.create_ai_disclosure(
system_name="Job Application Screener",
capabilities=["Screen resumes", "Identify qualified candidates"],
limitations=["Cannot assess culture fit", "Cannot judge intangibles"],
risks=["May have hiring bias", "Cannot fully assess potential"]
)
print(disclosure)
Privacy Considerations
LLM systems handle sensitive data. Protect it:
class PrivacyPolicy:
"""Privacy policy for LLM systems."""
def __init__(self, system_name: str):
self.system = system_name
self.pii_handling = {}
def classify_data_sensitivity(self, data_type: str) -> str:
"""Classify sensitivity of data types."""
pii = ["name", "email", "phone", "address", "ssn", "credit_card"]
sensitive = ["health", "financial", "legal", "behavioral"]
moderate = ["age", "education", "location"]
if data_type in pii:
return "PII - Strictly confidential"
elif data_type in sensitive:
return "Sensitive - Requires protection"
else:
return "General - Standard protection"
def set_data_retention_policy(self,
data_type: str,
retention_days: int):
"""Set how long data should be kept."""
self.pii_handling[data_type] = {
"retention_days": retention_days,
"deletion_required_after": retention_days,
"policy": f"Delete after {retention_days} days"
}
def check_pii_exposure(self, text: str) -> dict:
"""Detect if text contains PII."""
import re
pii_patterns = {
"email": r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b',
"phone": r'\+?1?\d{9,15}',
"ssn": r'\d{3}-\d{2}-\d{4}',
"credit_card": r'\d{4}[\s-]?\d{4}[\s-]?\d{4}[\s-]?\d{4}'
}
found_pii = {}
for pii_type, pattern in pii_patterns.items():
matches = re.findall(pattern, text)
if matches:
found_pii[pii_type] = len(matches)
return {
"contains_pii": len(found_pii) > 0,
"pii_found": found_pii,
"risk_level": "critical" if found_pii else "none"
}
# Usage
privacy = PrivacyPolicy("Customer Analytics System")
privacy.set_data_retention_policy("email", retention_days=30)
privacy.set_data_retention_policy("name", retention_days=90)
# Check for PII leakage
text = "John Smith (john@example.com, 555-123-4567) applied for a job"
exposure = privacy.check_pii_exposure(text)
print("PII exposure:", exposure)
Building an Ethical Review Process
Systematic review of AI decisions:
class EthicalReviewProcess:
"""
Structured process for ethical review of AI systems.
"""
REVIEW_CHECKLIST = {
"Impact Assessment": [
"Does this system affect people's rights or welfare?",
"Who are the stakeholders?",
"What are potential harms?",
"What are potential benefits?"
],
"Fairness Review": [
"Could this system discriminate against any group?",
"Has bias been tested?",
"Are outcomes equitable?",
"Is human oversight included?"
],
"Transparency Review": [
"Can users understand how decisions are made?",
"Is disclosure adequate?",
"Can users appeal decisions?",
"Is the system explainable?"
],
"Privacy Review": [
"Is personal data minimized?",
"Is consent obtained?",
"Are data retention policies clear?",
"Is data secure?"
],
"Accountability Review": [
"Is there human oversight?",
"Are errors tracked and reported?",
"Is there a process to address harms?",
"Is governance in place?"
]
}
def __init__(self):
self.reviews = []
def conduct_ethical_review(self,
system_name: str,
system_description: str) -> dict:
"""Conduct comprehensive ethical review."""
review_result = {
"system": system_name,
"timestamp": datetime.now().isoformat(),
"checklist_results": {},
"concerns": [],
"recommendations": [],
"approval_status": "pending"
}
for category, questions in self.REVIEW_CHECKLIST.items():
results = {q: "needs_clarification" for q in questions}
review_result["checklist_results"][category] = results
self.reviews.append(review_result)
return review_result
def approve_system(self,
system_name: str,
conditions: List[str] = None) -> dict:
"""Approve system with conditions."""
for review in self.reviews:
if review["system"] == system_name:
review["approval_status"] = "approved_with_conditions" if conditions else "approved"
review["conditions"] = conditions or []
return review
return {"error": "System not found"}
# Usage
reviewer = EthicalReviewProcess()
review = reviewer.conduct_ethical_review(
system_name="Job Screening Bot",
system_description="Screens resumes to identify qualified candidates"
)
print("Ethical review checklist:")
for category, items in review["checklist_results"].items():
print(f"\n{category}:")
for item in items:
print(f" - {item}")
# Approve with conditions
approval = reviewer.approve_system(
"Job Screening Bot",
conditions=[
"Must test for hiring bias",
"Human review required for borderline candidates",
"Track hiring outcomes to detect discrimination"
]
)
print("\nApproval status:", approval["approval_status"])
Key Takeaway: Governance, compliance, and ethics aren’t obstacles—they’re essential infrastructure for trustworthy, long-term AI deployment. Build systems with oversight, transparency, and accountability from the start.
Exercise: Create a Prompt Governance Checklist for Your Organization
Build a comprehensive governance checklist that covers:
- Risk assessment framework
- Regulatory compliance
- Fairness and bias testing
- Security requirements
- Ethical review process
Requirements:
- Classify use cases by risk level
- Map applicable regulations
- Create compliance checklists
- Build ethical review framework
- Generate governance report template
Starter code:
class OrganizationGovernanceProgram:
"""Organization-wide prompt governance program."""
def __init__(self, organization_name: str):
self.organization = organization_name
self.framework = GovernanceFramework(organization_name)
self.regulatory = RegulatoryFramework()
self.ethical_reviewer = EthicalReviewProcess()
def create_governance_checklist(self, use_case: str, risk_level: RiskLevel) -> dict:
"""
Create comprehensive governance checklist for a use case.
Returns:
Checklist with all governance requirements
"""
# TODO: Build use case policy
# TODO: Get applicable regulations
# TODO: Create compliance checklist
# TODO: Create ethical review
# TODO: Return comprehensive checklist
pass
def approve_use_case(self, use_case: str) -> dict:
"""
Go/no-go decision for deploying a use case.
"""
# TODO: Gather all requirements
# TODO: Check compliance
# TODO: Verify ethical review
# TODO: Return approval decision
pass
program = OrganizationGovernanceProgram("MyCompany")
checklist = program.create_governance_checklist(
use_case="Customer Support AI",
risk_level=RiskLevel.MEDIUM
)
Extension challenges:
- Build automated compliance checking
- Create governance dashboard for executives
- Implement audit trail system
- Build incident response procedures
- Create training program for teams using AI
By completing this exercise, you’ll establish a governance foundation for responsible AI deployment in your organization.