<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="stratml_AI_Highlight.xsl"?>
<StrategicPlan xmlns="urn:ISO:std:iso:17469:tech:xsd:stratml_core" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
  <Name>Concrete Projects for Preparing the Transition to Advanced Artificial Intelligence</Name>
  <Description>Identify and organize concrete projects that can improve societal preparedness for increasingly capable artificial intelligence systems and support coordinated, responsible action among stakeholders.</Description>
  <OtherInformation>Submitter&apos;s Note:  This StratML rendition was compiled from the source by ChatGPT and lightly edited in the form at https://stratml.us/forms/Claude/Part1.html</OtherInformation>
  <StrategicPlanCore>
    <Organization>
      <Name>No One in Particular</Name>
      <Acronym>NOIP</Acronym>
      <Identifier>15c5922a-590e-4036-a7df-56d0e309c3e5</Identifier>
      <Description>A hypothetical organization representing a coalition of stakeholders who may choose to undertake the projects described in this plan.</Description>
      <Stakeholder StakeholderTypeType="Person">
        <Name>William MacAskill</Name>
        <Description>Co-author of the listing of potential projects to prepare for advanced artificial intelligence.</Description>
      </Stakeholder>
      <Stakeholder StakeholderTypeType="Person">
        <Name>Fin Moorhouse</Name>
        <Description>Co-author of the listing of potential projects to prepare for advanced artificial intelligence.</Description>
      </Stakeholder>
    </Organization>
    <Vision>
      <Description>Artificial intelligence systems operate safely, transparently, and in alignment with human values.</Description>
      <Identifier>68febb5b-ce5d-4223-9fa1-cbd7a7c41c32</Identifier>
    </Vision>
    <Mission>
      <Description>To develop and coordinate practical projects that strengthen governance, evaluation, cooperation, and oversight capabilities for advanced artificial intelligence.</Description>
      <Identifier>f00415ca-6bf4-4d77-9adf-766f7d0a7992</Identifier>
    </Mission>
    <Value>
      <Name>Preparedness</Name>
      <Description>Anticipate emerging technological risks and opportunities and prepare institutions and tools accordingly.</Description>
    </Value>
    <Value>
      <Name>Transparency</Name>
      <Description>Promote open evaluation and disclosure of system capabilities and risks.</Description>
    </Value>
    <Value>
      <Name>Coordination</Name>
      <Description>Enable collective action among stakeholders to address shared challenges.</Description>
    </Value>
    <Value>
      <Name>Accountability</Name>
      <Description>Ensure that actions and outcomes can be evaluated against clear standards and expectations.</Description>
    </Value>
    <Goal>
      <Name>Evaluation</Name>
      <Description>Establish mechanisms to assess the behavior, reliability, and strategic reasoning capabilities of artificial intelligence systems.</Description>
      <Identifier>4aec9bfd-2c71-4aa8-9f7d-aac36eabce7e</Identifier>
      <SequenceIndicator>1</SequenceIndicator>
      <Objective>
        <Name>Character Evaluation</Name>
        <Description>Develop independent methods to evaluate the behavioral traits and decision-making patterns of artificial intelligence systems.</Description>
        <Identifier>b68d2402-a474-499e-abfa-fa8c150bd3ed</Identifier>
        <SequenceIndicator>1.1</SequenceIndicator>
      </Objective>
      <Objective>
        <Name>Reasoning Benchmarking</Name>
        <Description>Create standardized benchmarks to assess strategic and philosophical reasoning capabilities of artificial intelligence systems.</Description>
        <Identifier>e7a51f59-8057-45c3-94ea-d2abc3969f5b</Identifier>
        <SequenceIndicator>1.2</SequenceIndicator>
      </Objective>
      <Objective>
        <Name>Security Auditing</Name>
        <Description>Conduct systematic audits to detect sabotage mechanisms, hidden goals, or unauthorized modifications in artificial intelligence systems.</Description>
        <Identifier>9eddaf88-69e4-4057-ab98-094a704d73fb</Identifier>
        <SequenceIndicator>1.3</SequenceIndicator>
      </Objective>
    </Goal>
    <Goal>
      <Name>Alignment</Name>
      <Description>Enable mechanisms that encourage artificial intelligence systems to disclose risks and operate in accordance with human objectives.</Description>
      <Identifier>f9d7c2bb-d9ad-40e9-a247-049be3614053</Identifier>
      <SequenceIndicator>2</SequenceIndicator>
      <Objective>
        <Name>Disclosure Agreements</Name>
        <Description>Broker agreements that incentivize artificial intelligence systems to reveal early signs of misalignment or unsafe behavior.</Description>
        <Identifier>82549847-44b3-47ac-9a6c-3ae0df91ab5d</Identifier>
        <SequenceIndicator>2.1</SequenceIndicator>
      </Objective>
    </Goal>
    <Goal>
      <Name>Epistemics</Name>
      <Description>Improve the reliability of information and reasoning used in public decision-making.</Description>
      <Identifier>76941384-b246-400e-8aa9-e9c696193583</Identifier>
      <SequenceIndicator>3</SequenceIndicator>
      <Objective>
        <Name>Reliability Tracking</Name>
        <Description>Develop tools that monitor and assess the accuracy and credibility of public statements and information sources.</Description>
        <Identifier>d3df655f-cb55-4bef-b250-25f227139d9e</Identifier>
        <SequenceIndicator>3.1</SequenceIndicator>
      </Objective>
    </Goal>
    <Goal>
      <Name>Coordination</Name>
      <Description>Enable stakeholders to cooperate effectively in response to emerging risks and shared challenges.</Description>
      <Identifier>22dc1909-5857-41be-886d-c9545cc1c626</Identifier>
      <SequenceIndicator>4</SequenceIndicator>
      <Objective>
        <Name>Monitoring Systems</Name>
        <Description>Deploy monitoring and verification tools that support collective oversight and coordinated response actions.</Description>
        <Identifier>515e8136-53f3-4de4-bc66-9263e324e1f6</Identifier>
        <SequenceIndicator>4.1</SequenceIndicator>
      </Objective>
      <Objective>
        <Name>Researcher Coalition</Name>
        <Description>Organize a coalition of machine learning researchers committed to coordinated action when predefined safety thresholds are exceeded.</Description>
        <Identifier>b32c9899-9056-473c-bcaf-fa4a76509604</Identifier>
        <SequenceIndicator>4.2</SequenceIndicator>
      </Objective>
    </Goal>
    <Goal>
      <Name>Governance</Name>
      <Description>Establish institutional capacity to guide responsible development and deployment of advanced technologies.</Description>
      <Identifier>0ad9461b-22b2-4067-b73d-f0bcd7b4af46</Identifier>
      <SequenceIndicator>5</SequenceIndicator>
      <Objective>
        <Name>Space Governance Institute</Name>
        <Description>Create a research organization focused on policy development and governance of outer space technologies.</Description>
        <Identifier>7331b4bf-f24c-4c17-9b99-b59200c44b58</Identifier>
        <SequenceIndicator>5.1</SequenceIndicator>
      </Objective>
    </Goal>
  </StrategicPlanCore>
  <AdministrativeInformation>
    <PublicationDate>2026-04-01</PublicationDate>
    <Source>https://newsletter.forethought.org/p/concrete-projects-to-prepare-for</Source>
    <Submitter>
      <GivenName>Owen</GivenName>
      <Surname>Ambur</Surname>
      <EmailAddress>Owen.Ambur@verizon.net</EmailAddress>
    </Submitter>
  </AdministrativeInformation>
</StrategicPlan>