{ localUrl: '../page/complacency_valley.html', arbitalUrl: 'https://arbital.com/p/complacency_valley', rawJsonUrl: '../raw/2s8.json', likeableId: '1705', likeableType: 'page', myLikeValue: '0', likeCount: '2', dislikeCount: '0', likeScore: '2', individualLikes: [ 'EricRogstad', 'RyanCarey2' ], pageId: 'complacency_valley', edit: '1', editSummary: '', prevEdit: '0', currentEdit: '1', wasPublished: 'true', type: 'wiki', title: 'Valley of Dangerous Complacency', clickbait: 'When the AGI works often enough that you let down your guard, but it still has bugs. Imagine a robotic car that almost always steers perfectly, but sometimes heads off a cliff.', textLength: '1551', alias: 'complacency_valley', externalUrl: '', sortChildrenBy: 'likes', hasVote: 'false', voteType: '', votesAnonymous: 'false', editCreatorId: 'EliezerYudkowsky', editCreatedAt: '2016-03-23 22:33:57', pageCreatorId: 'EliezerYudkowsky', pageCreatedAt: '2016-03-23 22:33:57', seeDomainId: '0', editDomainId: 'EliezerYudkowsky', submitToDomainId: '0', isAutosave: 'false', isSnapshot: 'false', isLiveEdit: 'true', isMinorEdit: 'false', indirectTeacher: 'false', todoCount: '0', isEditorComment: 'false', isApprovedComment: 'true', isResolved: 'false', snapshotText: '', anchorContext: '', anchorText: '', anchorOffset: '0', mergedInto: '', isDeleted: 'false', viewCount: '161', text: 'The Valley of Dangerous Complacency is when a system works often enough that you let down your guard around it, but in fact the system is still dangerous enough that full vigilance is required.\n\n- If a robotic car made the correct decision 99% of the time, you'd need to grab the steering wheel on a daily basis, you'd stay alert and your robot-car-overriding skills would stay sharp.\n- If a robotic car made the correct decision 100% of the time, you'd relax and let your guard down, but there wouldn't be anything wrong with that.\n- If the robotic car made the correct decision 99.99% of the time, so that you need to grab the steering wheel or else crash in 1 of 100 days, the task of monitoring the car would feel very unrewarding and the car would seem pretty safe. You'd let your guard down and your driving skills would get rusty. After a couple of months, the car would crash.\n\nCompare "[Uncanny Valley](https://en.wikipedia.org/wiki/Uncanny_valley)" where a machine system is partially humanlike - humanlike enough that humans try to hold it to a human standard - but not humanlike enough to actually seem satisfactory when held to a human standard. This means that in terms of user experience, there's a valley as the degree of humanlikeness of the system increases where the user experience actually gets worse before it gets better. Similarly, if users become complacent, a 99.99% reliable system can be worse than a 99% reliable one, even though, with *enough* reliability, the degree of safety starts climbing back out of the valley.', metaText: '', isTextLoaded: 'true', isSubscribedToDiscussion: 'false', isSubscribedToUser: 'false', isSubscribedAsMaintainer: 'false', discussionSubscriberCount: '1', maintainerCount: '1', userSubscriberCount: '0', lastVisit: '', hasDraft: 'false', votes: [], voteSummary: 'null', muVoteSummary: '0', voteScaling: '0', currentUserVote: '-2', voteCount: '0', lockedVoteType: '', maxEditEver: '0', redLinkCount: '0', lockedBy: '', lockedUntil: '', nextPageId: '', prevPageId: '', usedAsMastery: 'false', proposalEditNum: '0', permissions: { edit: { has: 'false', reason: 'You don't have domain permission to edit this page' }, proposeEdit: { has: 'true', reason: '' }, delete: { has: 'false', reason: 'You don't have domain permission to delete this page' }, comment: { has: 'false', reason: 'You can't comment in this domain because you are not a member' }, proposeComment: { has: 'true', reason: '' } }, summaries: {}, creatorIds: [ 'EliezerYudkowsky' ], childIds: [], parentIds: [ 'AI_safety_mindset' ], commentIds: [ '2sx' ], questionIds: [], tagIds: [], relatedIds: [], markIds: [], explanations: [], learnMore: [], requirements: [], subjects: [], lenses: [], lensParentId: '', pathPages: [], learnMoreTaughtMap: {}, learnMoreCoveredMap: {}, learnMoreRequiredMap: {}, editHistory: {}, domainSubmissions: {}, answers: [], answerCount: '0', commentCount: '0', newCommentCount: '0', linkedMarkCount: '0', changeLogs: [ { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '8982', pageId: 'complacency_valley', userId: 'EliezerYudkowsky', edit: '1', type: 'newEdit', createdAt: '2016-03-23 22:33:57', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '8976', pageId: 'complacency_valley', userId: 'EliezerYudkowsky', edit: '0', type: 'newParent', createdAt: '2016-03-23 22:26:41', auxPageId: 'AI_safety_mindset', oldSettingsValue: '', newSettingsValue: '' } ], feedSubmissions: [], searchStrings: {}, hasChildren: 'false', hasParents: 'true', redAliases: {}, improvementTagIds: [], nonMetaTagIds: [], todos: [], slowDownMap: 'null', speedUpMap: 'null', arcPageIds: 'null', contentRequests: {} }