
<!--Ingeniux Dynamic Site Server. Version:10.3.176--><InsetPage ID="x4764" Name="Policy, Guidance, and Risk Reduction" IsComponent="false" Changed="20250731T02:31:02" Created="20250521T18:27:27" ViewName="InsetPage" Published="20250730T22:31:12" SURL="/expertise/caia/caia-areas/policy-guidance-and-risk-reduction" SiteBaseUrl="" Locale="" XPowerPath="/cna.org/Home/Expertise/CAIA/CAIA Areas/Policy, Guidance, and Risk Reduction">
  <IGX_Categories Count="0" CategoryIds="" />
  <LingualMaps />
  <XHTML ID="" Name="RowComponent" IsComponent="true" Changed="20250717T13:58:35" Created="20250521T18:27:27" SURL="/expertise/caia/caia-areas/policy-guidance-and-risk-reduction" SiteBaseUrl="" Locale="" XPowerPath="/cna.org/Home/Expertise/CAIA/CAIA Areas/Policy, Guidance, and Risk Reduction" COMPUID="590d82015c77d5f63a2239dfb182685a" Embedded="true" UID="590d82015c77d5f63a2239dfb182685a"><IGX_Categories Count="0" CategoryIds="" /><LingualMaps />
        <XHTML type="xhtml" UID="3dffc4f778854a4998cd412741531d21" label="XHTML" readonly="false" hidden="false" required="true" indexable="true" Height="" CIID="">&lt;p&gt;&lt;strong&gt;Policy, Guidance, and Risk Reduction.&lt;/strong&gt; CNA helps clients develop policies that foster innovation while safeguarding public trust and upholding the highest standards of accountability. We empower government agencies to navigate the complexities of AI with tailored policy solutions that ensure ethical, transparent, secure, and effective AI deployment. At CNA, we understand that legal and ethical oversight are an integral part of delivering capabilities that create value for our clients.&lt;/p&gt;</XHTML>
        <BackgroundColor type="enumeration" UID="333c2c60140a419481e8f82c2a694f1d" label="Background Color" readonly="false" hidden="false" required="false" indexable="false" values="white gray orange blue" CIID="">blue</BackgroundColor><Padding type="enumeration" UID="0fa3feada24542e58a8936e712cb4ea1" label="Padding" readonly="false" hidden="false" required="false" indexable="false" values="default bottom-only top-only none" CIID="">none</Padding><ClassNames type="string" UID="865762e6b0f64ae8bdab714bd0572adf" label="Class Names" readonly="false" hidden="false" required="false" indexable="false" CIID=""></ClassNames>
      </XHTML><LinkLists ID="" Name="RowComponent" IsComponent="true" Changed="20250717T13:58:35" Created="20250521T18:27:27" SURL="/expertise/caia/caia-areas/policy-guidance-and-risk-reduction" SiteBaseUrl="" Locale="" XPowerPath="/cna.org/Home/Expertise/CAIA/CAIA Areas/Policy, Guidance, and Risk Reduction" COMPUID="8f95eff002ae4e5499d4cfdf40bd1802" Embedded="true" UID="8f95eff002ae4e5499d4cfdf40bd1802"><IGX_Categories Count="0" CategoryIds="" /><LingualMaps />
        <DisplayType type="enumeration" UID="f332c381642948a389c8a405a8c26306" label="Display Type" readonly="false" hidden="false" required="true" indexable="false" values="ArticleList Blurbs Buttons Callouts CategoryLists FeaturedSquares FeaturedSquares-NoDescription FeaturedSquares-NoImages FullscreenList Slideshow FeaturedSlideshow" CIID="">Blurbs</DisplayType>
        <TaxonomyNavigation StartNodes="" Depth="-1" IncludePages="true" Name="Tags" label="Tags" UID="7afa30c90a02440780691b2236a0f302" />
        <LinkSet Title="" LinkIDS="x4011|x2581|x768|x1058|x1282|x1288|x1295|x1003|x1298|x4699" Name="Links" label="Links" UID="ce7962a65e7a42e9889d201ef64d398d"><Page ID="x4011" URL="our-media/indepth/2024/05/a-process-for-using-llms-in-a-national-security-research-organization" Schema="Article" Locale="" Changed="20250731T02:51:43" CategoryIds="500|161|386" Name="A Process for Using LLMs in a National Security Research Organization" Description="We collaborated across disciplines to develop a process to safely introduce large language models without compromising quality or national security." PageTitle="A Process for Using LLMs in a National Security Research Organization" PageImage="/images/About-Us/CNA-HQ-front-cropped.jpg" PageImageAlt="Entrance to CNA headquarters" AuthorName="Rizwan Jan and Kim Deal " PublishedDate="20240522T19:51:00" IsComponent="false" /><Page ID="x2581" URL="quick-looks/2023/ai-and-nuclear-operations-identifying-and-mitigating-risks" Schema="Quick-Look" Locale="" Changed="20250805T16:07:39" CategoryIds="36" Name="AI AND NUCLEAR OPERATIONS: IDENTIFYING AND MITIGATING RISKS" Description="Summarizes the steps that can help guide nuclear powers, and militaries overall, to have AI applications reduce risks associated with nuclear operations" PageTitle="Identifying and Mitigating Risks" PageImage="/images/quick-look.webp" PreTitle="AI and Nuclear Operations" PageImageAlt="first page of text" CoverImage="/quick-looks/2023/AI-NUCLEAR-OPERATIONS-IDENTIFYING-MITIGATING%20RISKS_Page_1.png" HideFromNavigation="true" PublicationFile="/quick-looks/2023/AI-NUCLEAR-OPERATIONS-IDENTIFYING-MITIGATING%20RISKS.pdf" PublishedDate="20230209T15:07:00" IsComponent="false" /><Page ID="x768" URL="analyses/2021/10/addressing-ai-challenges-in-human-machine-teaming" Schema="Report" Locale="" Changed="20250527T18:19:09" CategoryIds="36|28|516|30" Name="Addressing AI Challenges in Human Machine Teaming" Description="Our review of DON strategy documents indicates that DON is paying insufficient attention to the full range of DOTMLPF-P (doctrine, organization, training, materiel, leadership &amp; education, personnel, facilities, and policy) implications that these technologies will have. In particular, more attention needs to be paid to the Manpower, Personnel, Training, and Education (MPT&amp;E) to enable human-machine teaming." PageImage="/reports/2021/10/Underwater-Machine.jpg" PageImageAlt="man testing underwater robot " CoverImage="/reports/2021/10/addressing-AIs-Operational-Challenges-Putting-the-Human-in-Human-Machine-Teaming.PNG" CoverImageAlt="first page of text" PublicationFile="/reports/2021/10/Addressing-AIs-Operational-Challenges-Putting-the-Human-in-Human-Machine-Teaming.pdf" Title="Addressing AI's Operational Challenges: Putting Human in Human-Machine Teaming" PublishedDate="20211029T04:00:00" ShortTitle="Addressing AI's Operational Challenges" IsComponent="false" /><Page ID="x1058" URL="analyses/2019/10/ai-safety-navy-action-plan" Schema="Report" Locale="" Changed="20250527T18:18:43" CategoryIds="518|36|16" Name="AI Safety Navy Action Plan" Description="In light of the Navy’s stated commitment to using AI, and given the strategic importance of AI safety, we provide the Navy with a first step towards a comprehensive approach to safety. We use a risk management approach to frame our treatment of AI safety risks: identifying risks, analyzing them, and suggesting concrete actions for the Navy to begin addressing them. The first type of safety risk, being technical in nature, will require a collaborative effort with industry and academia to address. The second type of risk, associated with specific military missions, can be addressed in a combination of military experimentation, research, and concept development to find ways to promote effectiveness along with safety. For each types of risk, we use examples to show concrete ways of managing and reducing the risk of AI applications. We then discuss institutional changes that would help promote safety in the Navy’s AI efforts." PageImage="/reports/2019/10/keep%20calm.png" PageImageAlt="Keep Calm and Work on AI Safety" CoverImage="/reports/2019/10/DOP-2019-U-021957-1Rev.png" CoverImageAlt="Cover" PublicationFile="/reports/2019/10/DOP-2019-U-021957-1Rev.pdf" Title="AI Safety: An Action Plan for the Navy" PublishedDate="20191022T17:27:00" IsComponent="false" /><Page ID="x1282" URL="analyses/2018/08/ai-and-autonomy-understanding-and-mitigating-risks" Schema="Report" Locale="" Changed="20250527T18:18:43" CategoryIds="519|36|16|6" Name="AI and Autonomy Understanding and Mitigating Risks" Description="Given the rapid and significant advances in AI, the strong interest in leveraging this technology from advanced militaries, and the urgent concerns voiced in the media, we examined commonly held concerns of AI and autonomy in war. We found that these concerns, on further examination, were not quite what they seemed on first blush. Some concerns were inconsistent with the current state of the technology, such as assuming that general AI is feasible when most estimates place this development many decades away (if ever). Others do not adequately consider the way military systems are actually structured and conducted, in which AI-enabled and autonomous systems would operate as part of a larger process for delivering the use of force. This larger context helps address concerns about accountability and discrimination. Note that we did not argue that these concerns are spurious—they have value because they can lead to much needed debates and discussions regarding ethical issues of this emerging technology. However, we emphasized that the real risk in a military context (expressed in operational outcomes such as civilian casualties and fratricide) is low from these commonly held concerns. This is important from a risk management perspective because a mismatch between efforts to mitigate risk and the actual sources of risk could lead to the pursuit of ineffective solutions." PageImage="/reports/2018/08/AI%20soldier.jpg" PageImageAlt="a solider in front of the word &quot;AI&quot;" CoverImage="/reports/2018/08/DOP-2018-U-018296-Final_Page_01.jpg" CoverImageAlt="Cover" PublicationFile="/reports/2018/08/DOP-2018-U-018296-Final.pdf" Title="AI and Autonomy in War: Understanding and Mitigating Risks" PublishedDate="20180828T15:31:00" IsComponent="false" /><Page ID="x1288" URL="analyses/2017/ai-robots-and-swarms" Schema="Report" Locale="" Changed="20250527T18:18:43" CategoryIds="9|520|36|29" Name="AI Robots and Swarms" Description="The military is on the cusp of a major technological revolution, in which warfare is conducted by unmanned and increasingly autonomous weapon systems. However, unlike the last “sea change,” during the Cold War, when advanced technologies were developed primarily by the Department of Defense (DoD), the key technology enablers today are being developed mostly in the commercial world. This study looks at the state-of-the-art of AI, machine-learning, and robot technologies, and their potential future military implications for autonomous (and semi-autonomous) weapon systems. While no one can predict how AI will evolve or predict its impact on the development of military autonomous systems, it is possible to anticipate many of the conceptual, technical, and operational challenges that DoD will face as it increasingly turns to AI-based technologies. This study examines key issues, identifies analysis gaps, and provides a roadmap of opportunities and challenges. It concludes with a list of recommended future studies." PageImage="/images/GenericReportImage.jpg" PageImageAlt="Generic Report Cover" CoverImage="/reports/2017/DRM-2017-U-014796-Final_Page_001.jpg" CoverImageAlt="Cover" PublicationFile="/reports/2017/DRM-2017-U-014796-Final.pdf" Title="AI, Robots, and Swarms: Issues, Questions, and Recommended Studies" PublishedDate="20170101T16:31:00" IsComponent="false" /><Page ID="x1295" URL="analyses/2017/insights-for-the-third-offset" Schema="Report" Locale="" Changed="20250527T18:18:43" CategoryIds="9|520|36|29" Name="Insights for the Third Offset" Description="CNA conducts analysis for the U.S. Navy, the Department of Defense (DOD), and other sponsors, ranging across policy, strategy, organizational processes, technical performance of military systems, and current operations. Because of the expected impact of autonomy and artificial intelligence (AI) to the character of warfare, CNA has created a Center for Autonomy and Artificial Intelligence to focus on these emerging technologies and their significant role in U.S. defense policy and all the military services. The Center combines CNA’s strengths and experience in conducting objective analysis of U.S. military operations with focused expertise in autonomy and other aspects of AI. This report, the first created by the new Center, takes lessons and insights from CNA’s body of work for the Navy and the joint force, including CNA’s field program of embedded analysts in military commands around the world. Though much of the emerging technology examined in this report is new, the approach of applying lessons from U.S. operations and institutional processes to key challenges in leveraging autonomy and AI continues CNA’S applied research paradigm of exploring many opportunities to resolve or work around challenges that have been seen before. The aim of this report is to anticipate challenges of “Third Offset” implementation based on past lessons, and then provide concrete recommendations for promoting the effective incorporation of autonomy, AI, and related technologies in U.S. military operations. This report discusses making autonomy and AI militarily effective from an acquisition and technology perspective, and how to pursue these capabilities in ways that are consistent with long-standing U.S. values and that promote broader U.S. national interests." PageImage="/reports/2017/07/green%20brain.jpg" PageImageAlt="A green brain" CoverImage="/reports/2017/07/DRM-2017-U-016281-Final_Page_01.jpg" CoverImageAlt="Cover" PublicationFile="/reports/2017/07/DRM-2017-U-016281-Final.pdf" Title="Insights for the Third Offset: Addressing Challenges of Autonomy and Artificial Intelligence in Military Operations " PublishedDate="20170901T13:41:00" IsComponent="false" /><Page ID="x1003" URL="analyses/2020/09/social-media-bots-laws" Schema="Report" Locale="" Changed="20250527T18:18:43" CategoryIds="517|36|8|24" Name="Social Media Bots: Laws" Description="While social media bots have the ability to greatly affect US national security and public discourse, the current landscape of US federal and state laws regulating such bots is limited. This study explores the challenges inherent to passing social media bot-related legislation and details current efforts to do so, including at the federal and state levels. It briefly explores the context in the European Union as well. This paper then discusses the dilemmas social media companies face as they think about effective bot policies and identifies the four main categories of policies through which the social media platforms regulate the use of bots on their sites. As they face evolving threats from bots, the social media companies will continue to adapt their policies accordingly, though it remains an open question whether and to what extent these companies should regulate themselves in the face of additional pressure from Congress and the public. " PageImage="/reports/2020/10/jack%20dorsey.png" PageImageAlt="Social media executives are sworn in to testify before U.S. Senate Intelligence Committee on Capitol Hill in Washington" CoverImage="/reports/2020/10/DIM-2020-U-028193-Final.png" CoverImageAlt="Cover" PublicationFile="/reports/2020/10/DIM-2020-U-028193-Final.pdf" Title="Social Media Bots: Laws, Regulations, and Platform Policies" PublishedDate="20200918T12:54:00" IsComponent="false" /><Page ID="x1298" URL="analyses/2017/ai-and-autonomy-opportunities-and-challenges" Schema="Report" Locale="" Changed="20250527T18:18:43" CategoryIds="9|520|36|29" Name="AI and Autonomy Opportunities and Challenges" Description="The military is on the cusp of a major technological revolution, in which warfare is conducted by unmanned and increasingly autonomous weapon systems. This exploratory study considers the state-of-the-art of artificial intelligence (AI), machine- learning, and robot technologies, and their potential future military implications for autonomous (and semi-autonomous) weapon systems. Although no one can predict how AI will evolve or how it will affect the development of military autonomous systems, we can anticipate many of the conceptual, technical, and operational challenges that DOD will face as it increasingly turns to AI-based technologies. We identified four key gaps facing DOD as the military evolves toward an “autonomy era”: (1) a mismatch of timescales between the pace of commercial innovation and DOD’s acquisition process; (2) an underappreciation of the fundamental unpredictability of autonomous systems; (3) a lack of a universally agreed upon conceptual framework for autonomy; and (4) a disconnect between the design of autonomous systems and CONOPS development. We examine these gaps, provide a roadmap of opportunities and challenges, and identify areas of future studies." PageImage="/reports/2017/thinker%202.jpg" PageImageAlt="The Thinker made out of geometric webs" CoverImage="/reports/2017/DIS-2017-U-016388-Final_Page_01.jpg" CoverImageAlt="Cover" PublicationFile="/reports/2017/DIS-2017-U-016388-Final.pdf" Title="Artificial Intelligence &amp; Autonomy: Opportunities and Challenges" PublishedDate="20171001T13:41:00" IsComponent="false" /><Page ID="x4699" URL="analyses/2025/05/artificial-intelligence-maturity-model" Schema="Report" Locale="" Changed="20251118T21:45:16" CategoryIds="642|514" Name="Artificial Intelligence Maturity Model" Description="The CNA AI Maturity Model enables government agencies to  understand and communicate the current maturity level of their AI programs, the  level of maturity they are striving to achieve in a given period of time, and the path to get there." PageImage="/images/GenericReportImage.jpg" CoverImage="/reports/2025/05/AI-Maturity-Model-Government-Agencies-Cover.webp" HideFromNavigation="false" PublicationFile="/reports/2025/05/AI-Maturity-Model-Government-Agencies.pdf" Title="CNA’s Artificial Intelligence (AI) Maturity Model for Government Agencies" PublishedDate="20250501T15:58:00" ShortTitle="AI Maturity Model for Government Agencies" Thumbnail="/images/expertise/CAIA/AI_Maturity_Model_Image.webp" IsComponent="false" /></LinkSet>
        
        
        <NonLinkDisclaimer type="string" UID="6348d4f62cef4315bc4fc62f14348c7d" label="Non-Link Disclaimer" readonly="false" hidden="false" required="false" indexable="false" CIID=""></NonLinkDisclaimer>
        <BackgroundColor type="enumeration" UID="2138486f7e814218a030db71dd77efe4" label="Background Color" readonly="false" hidden="false" required="false" indexable="false" values="white gray orange blue" CIID="">blue</BackgroundColor><ButtonColor type="enumeration" UID="690c4318a7454fd08cd583de6b6f43e2" label="Button Color" readonly="false" hidden="false" required="false" indexable="false" values="white gray orange blue" CIID=""></ButtonColor><ClassNames type="string" UID="95df97089fb14f33923eb6deafdb2cbb" label="Class Names" readonly="false" hidden="false" required="false" indexable="false" CIID=""></ClassNames><MaxRecords type="string" UID="aba07669c2d0431bbbb972f104a81b6d" label="Max Number of Records to Display" readonly="false" hidden="false" required="false" indexable="false" CIID=""></MaxRecords><SlidesToDisplay type="string" UID="5da4a2187aaa4b29816629b255e132c9" label="Slides to Display (Slideshow only)" readonly="false" hidden="false" required="false" indexable="false" CIID="">3</SlidesToDisplay><PrioritizeThumbnails type="boolean" UID="a8e18c99190c47e1935b5b04e88a8a38" label="Use Thumbnail Images (Slideshow only)" readonly="false" hidden="false" required="false" indexable="false" CIID=""></PrioritizeThumbnails>
        <ExploreMoreLink Type="Group" label="Explore More Link"><ShowExploreMoreLink type="boolean" UID="ae1a9bd925a541b596f7d757ec0b2b5f" label="Show Explore More Link" readonly="false" hidden="false" required="false" indexable="false" CIID="">false</ShowExploreMoreLink><TaxonomyNavigation StartNodes="" Depth="-1" IncludePages="false" Name="Expertise" label="Expertise" UID="2e3a5d56dc574d38ba9cf75d23eb43d0" /></ExploreMoreLink>
        <ShowItemExploreLinks type="boolean" UID="400bc51c88c44505be6601305649043f" label="ShowItemExploreLinks" readonly="false" hidden="false" required="false" indexable="false" CIID="">false</ShowItemExploreLinks><TaxonomyNavigation StartNodes="" Depth="-1" IncludePages="false" Name="AllowedCategories" label="Allowed Categories" UID="0555fdfb1d4b484d8e736453577184d3" />
      </LinkLists>
<IGX_Info><GET /><POST /><COOKIES><IGXSessionState>ujh4sgggicreltawi1rbcpgh</IGXSessionState></COOKIES><XPRESS /><SERVER_VARIABLES /><REQUEST_INFO><URL>https://www.cna.org/x4764.xml</URL><ORIGINAL_URL>https://www.cna.org/expertise/caia/caia-areas/policy-guidance-and-risk-reduction</ORIGINAL_URL><USER_AGENT>Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; ClaudeBot/1.0; +claudebot@anthropic.com)</USER_AGENT></REQUEST_INFO></IGX_Info></InsetPage>