{"collectionById":{"06a985d6-1e7c-4ad2-97d7-4627d881e46a":{"id":"06a985d6-1e7c-4ad2-97d7-4627d881e46a","name":"Blog","fieldSchemas":[{"id":"49ac5d42-82a6-4b89-8724-e32091479a00","name":"Hero Image","type":"image"},{"id":"b3320911-05fb-4949-a507-bb42a2353f5a","name":"Slug","type":"slug","role":"slug"},{"id":"3125ba82-ef98-43db-b6aa-146996af08fc","name":"Rich text","type":"rich_text"},{"id":"7b2ee06c-17fe-4874-9b8d-05893a9af5d9","name":"Title","type":"plain_text","role":"primary"},{"id":"934efc19-f855-4839-ad0b-015f3277b8e7","name":"Date","type":"date"}],"itemById":{"069b1b96-791a-4881-9dbf-ed9fe20bd23b":{"id":"069b1b96-791a-4881-9dbf-ed9fe20bd23b","index":"!NNNNNO","collectionId":"06a985d6-1e7c-4ad2-97d7-4627d881e46a","fields":[{"id":"c8194fed-8828-4944-a610-4a2e690b1d73","value":"The AI Expertise Paradox: When AI Leaders Fail at AI Governance","itemId":"069b1b96-791a-4881-9dbf-ed9fe20bd23b","fieldSchemaId":"7b2ee06c-17fe-4874-9b8d-05893a9af5d9"},{"id":"be2e2e79-17fc-4fe7-a28c-ca9abac68ed0","value":"2026-04-22","itemId":"069b1b96-791a-4881-9dbf-ed9fe20bd23b","fieldSchemaId":"934efc19-f855-4839-ad0b-015f3277b8e7"},{"id":"d3754c08-1212-4074-a5c5-da0adf949f09","value":"{\"root\":{\"children\":[{\"children\":[{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"Sullivan \u0026 Cromwell has apologized to a federal bankruptcy judge after a court filing was found to contain fabricated citations generated by AI\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"link\",\"version\":1,\"rel\":\"noreferrer\",\"target\":null,\"title\":null,\"url\":\"https://websitedc.s3.amazonaws.com/documents/In_re_Prince_USA_18_April_2026.pdf\"},{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\". The incident has been framed, predictably, as a cautionary tale about hallucinations.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"That framing is too small.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"This was not a model making things up, and it was not a lawyer failing to check their work. It was a governance failure inside a firm that advises clients on AI risk, publishes guidance on responsible deployment, and represents some of the most advanced AI companies in the world.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"Which raises an uncomfortable question for every GRC function watching: if a firm this sophisticated cannot prevent the most well-documented failure mode of generative AI, what does that say about the controls the rest of us rely on?\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":1,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":1,\"mode\":\"normal\",\"style\":\"\",\"text\":\"The real paradox is what expertise quietly replaces.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":1,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"The instinct is to call this an expertise paradox, where the organizations closest to AI are the most exposed to its risks. The sharper version is this. Institutional confidence in AI literacy tends to substitute for operational control. When a firm believes its people understand the technology, that belief absorbs the friction, the cite checks, the grounding, the hard stops, that would otherwise prevent failure.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"Hallucinations are not edge cases. They are a structural feature of generative AI, fluent and plausible and difficult to detect without deliberate verification. Hallucination rate is therefore not a technical metric. It is a governance metric. It defines how often a system produces confident, incorrect output, and how robust your controls need to be. If a hallucination reaches a court filing, the model did not fail. The system around the model did.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":1,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":1,\"mode\":\"normal\",\"style\":\"\",\"text\":\"Policies existed. Training existed. Review existed. None of it worked.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":1,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"By the firm's own account, the governance elements were in place. Policies on AI use. Mandatory training. A requirement that AI output be independently verified. The firm acknowledged that these were not followed, and that its review process did not catch the errors before filing.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"This exposes the most common flaw in AI governance today, the assumption that policies and awareness are sufficient. They are not. Governance that depends on human compliance, in environments where outputs are fast, fluent, and high volume, will eventually break down. Not because people are careless, but because the system is designed in a way that makes failure likely.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"There were no embedded controls in the workflow. No hard stops preventing unverified content from being filed. No citation validation, no grounding against authoritative sources, no rule traceability linking each output back to the policy that governed its creation. The system depended on human review to catch machine-generated errors.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"That is not governance. That is hope.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":1,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":1,\"mode\":\"normal\",\"style\":\"\",\"text\":\"AI is not a junior associate.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":1,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"The supervision model failed in a related way. AI output was treated, implicitly, like the work of a junior lawyer, something that could be reviewed and trusted within a traditional hierarchy.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"But AI is not a junior associate. It has no understanding of truth, no awareness of error, no accountability. What it does have is the ability to produce language that looks correct. That is the dangerous property. Not that hallucinations occur, but that they are convincing. They sound authoritative. They follow expected patterns. They mirror expertise.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"type\":\"linebreak\",\"version\":1},{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"Review gets lighter. Assumptions replace verification. Confidence replaces scrutiny. AI does not just introduce new risks. It reshapes existing controls in ways that make those risks harder to detect.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":1,\"mode\":\"normal\",\"style\":\"\",\"text\":\"The firm can describe its policy. It cannot yet describe its pipeline.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":1,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"Perhaps the most telling detail is what is still missing from the public record. Which AI system was used, how it was accessed, and where its output flowed before reaching the filing. The firm has disclosed the governance shape, the training, the verification requirement, the policies, but not the operational pipeline that produced the error.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"In any other high-stakes outsourced process this would be unthinkable. Organizations are expected to know exactly which vendor produced which output, under what conditions, and with what controls. Without that visibility, governance becomes abstract, accountability diffuses, and failures become harder to contain.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"This is the gap that rule traceability is meant to close. Every AI-assisted output should be linkable to the specific policy it was produced under, the safeguards applied to it, and the verification step it passed through. Not as a retrospective audit exercise, but as a live property of the workflow itself.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":1,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":1,\"mode\":\"normal\",\"style\":\"\",\"text\":\"A profession problem, and increasingly an enterprise one.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":1,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"It is tempting to read this as an isolated lapse. It is not. Public databases tracking AI hallucinations in legal filings now contain more than a thousand examples. The pattern repeats across solo practitioners, mid-tier firms, and elite practices alike. What makes the Sullivan \u0026 Cromwell incident useful is that it strips away the excuses. This was not a firm lacking sophistication, training, or policy. It had all three. The controls still failed.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"The uncomfortable truth is that even the best firms using the best systems remain exposed to this failure mode, because the exposure is structural. Policies sit above the workflow. Training sits above the workflow. What is missing is a control layer inside the workflow, where rule traceability, embedded safeguards, and verifiable supervision operate as properties of the pipeline rather than expectations placed on the user.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"That is the problem we at Surveill are obsessed with solving. Building the supervisory layer that makes AI use auditable in real time, where every output carries its rule lineage, every safeguard is enforced at the point of action, and supervision remains intact from generation to filing. So that when regulators, auditors, or examiners ask what controls governed a given output, the answer is not a policy document. It is a verifiable chain of evidence.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"},{\"children\":[{\"detail\":0,\"format\":0,\"mode\":\"normal\",\"style\":\"\",\"text\":\"Because in the end, AI risk is not about what the model does. It is about what the organization allows to pass through.\",\"type\":\"text\",\"version\":1}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"paragraph\",\"version\":1,\"textFormat\":0,\"textStyle\":\"\"}],\"direction\":\"ltr\",\"format\":\"\",\"indent\":0,\"type\":\"root\",\"version\":1}}","itemId":"069b1b96-791a-4881-9dbf-ed9fe20bd23b","fieldSchemaId":"3125ba82-ef98-43db-b6aa-146996af08fc"},{"id":"4c5e28c5-d694-4cb7-ac0e-93f07404b145","value":"{\"image\":\"5b11c5f4a3fddaef2d0eced809998a62387a71cf\",\"imageThumbnail\":\"8695e2af3346154fec0e41a19ab1bce779450a13\",\"originalImageHeight\":962,\"originalImageWidth\":2424,\"altText\":\"\",\"fileName\":\"image 1 (2).png\"}","itemId":"069b1b96-791a-4881-9dbf-ed9fe20bd23b","fieldSchemaId":"49ac5d42-82a6-4b89-8724-e32091479a00"}]}}}},"slugByItemId":{"2a546bf0-17a5-4755-b0ec-d1f0cd8b3d71":"everyones-talking-about-ai-but-regulators-want-something-else","069b1b96-791a-4881-9dbf-ed9fe20bd23b":"the-ai-expertise-paradox-when-ai-leaders-fail-at-ai-governance","730c4107-95e1-4ec8-91df-1cb4f8d4e5cb":"supervision-isnt-a-checkbox-its-a-system-built-on-consistency","664c0c4c-5cad-4815-9df3-fbe1746ce34c":"what-defines-quality-in-compliance-marketing-reviews-for-financial-firms","7ea8cdd7-ab40-494d-ab40-d9711fe034a6":"cysecs-planned-cfd-broker-raids-reflect-a-deeper-industry-problem","8cec3a3f-d309-4597-b02d-307b4d4789a8":"is-100-risk-visibility-an-enigma-not-anymore","ef380578-665e-43b9-9abc-53eb2b5096aa":"turning-a-conundrum-into-a-strategic-advantage","36fe4ee8-1342-4406-beea-46ca752681f6":"how-surveill-elevates-compliance-marketing-reviews-for-financial-firms","5eb57d9a-5614-42f1-bc3e-a6e90fe02dc3":"claude-legal-coworker-big-signal-bigger-questions","5cebe5db-c2d8-4a3f-b2e9-0e5f7b648d67":"next-version-of-surveill-launched","b29ceeed-8fc7-46ab-af31-3f7f150f541a":"why-prompt-engineering-doesnt-work-in-legal-regtech-and-what-surveill-does-instead","498bd968-97c5-4bee-afb6-666d5f6167d8":"prop-trading-is-a-fugazi"}}