{ localUrl: '../page/programmer_deception.html', arbitalUrl: 'https://arbital.com/p/programmer_deception', rawJsonUrl: '../raw/10f.json', likeableId: '16', likeableType: 'page', myLikeValue: '0', likeCount: '2', dislikeCount: '0', likeScore: '2', individualLikes: [ 'EliezerYudkowsky', 'NathanFish' ], pageId: 'programmer_deception', edit: '3', editSummary: '', prevEdit: '2', currentEdit: '3', wasPublished: 'true', type: 'wiki', title: 'Programmer deception', clickbait: '', textLength: '2621', alias: 'programmer_deception', externalUrl: '', sortChildrenBy: 'likes', hasVote: 'false', voteType: '', votesAnonymous: 'false', editCreatorId: 'AlexeiAndreev', editCreatedAt: '2015-12-16 04:53:36', pageCreatorId: 'EliezerYudkowsky', pageCreatedAt: '2015-07-16 01:01:53', seeDomainId: '0', editDomainId: 'EliezerYudkowsky', submitToDomainId: '0', isAutosave: 'false', isSnapshot: 'false', isLiveEdit: 'true', isMinorEdit: 'false', indirectTeacher: 'false', todoCount: '2', isEditorComment: 'false', isApprovedComment: 'true', isResolved: 'false', snapshotText: '', anchorContext: '', anchorText: '', anchorOffset: '0', mergedInto: '', isDeleted: 'false', viewCount: '211', text: '[9r Programmer] deception is when the AI's decision process leads it to optimize for an instrumental goal of causing the programmers to have false beliefs. For example, if the programmers [6h intended] to create a [10d happiness maximizer] but actually created a pleasure maximizer, then the pleasure maximizer will estimate that there would be more pleasure later if the programmers go on falsely believing that they've created a happiness maximizer (and hence don't edit the AI's current utility function). Averting such incentives to deceive programmers is one of the major subproblems of [45 corrigibility].\n\nThe possibility of programmer deception is a central difficulty of [2l advanced safety] - it means that, unless the rest of the AI is working as intended and whatever programmer-deception-defeaters were built are functioning as planned, we can't rely on observations of nice current behavior to indicate future behavior. That is, if something went wrong with your attempts to build a nice AI, you could currently be observing a non-nice AI that is *smart* and trying to *fool you*. Arguably, some methodologies that have been proposed for building advanced AI are not robust to this possibility.\n\n[todo: clean this up and expand]\n\n- [ instrumental pressure] exists every time the AI's best strategic path doesn't have a global optimum that coincides with the programmers believing true things.\n - consider the highest utility obtainable if the programmers believe true beliefs B, and call this outcome O and the true beliefs B. if there's a higher-utility outcome O' which can be obtained when the programmers believe B' with B'!=B, we have an instrumental pressure to deceive the programmers.\n- happens when you combine the advanced agent properties of consequentialism with programmer modeling\n- this is an instrumental convergence problem, which means it involves an undesired instrumental goal, which means that we'll get Nearest Neighbor on attempts to define utility penalties for the programmers believing false things or otherwise exclude this as a special case\n - if we try to define a utility bonus for programmers believing true things, then of course ceteris paribus we tile the universe with tiny 'programmers' believing lots and lots of even numbers are even, and getting to this point temporarily involves deceiving a few programmers now\n- relation to the problem of programmer manipulation\n- central example of how divergences between intended goals and AI goals can blow up into astronomical failure\n- central driver of Treacherous Turn which in turn contributes to [6q Context Change]', metaText: '', isTextLoaded: 'true', isSubscribedToDiscussion: 'false', isSubscribedToUser: 'false', isSubscribedAsMaintainer: 'false', discussionSubscriberCount: '1', maintainerCount: '1', userSubscriberCount: '0', lastVisit: '2016-02-06 06:43:23', hasDraft: 'false', votes: [], voteSummary: 'null', muVoteSummary: '0', voteScaling: '0', currentUserVote: '-2', voteCount: '0', lockedVoteType: '', maxEditEver: '0', redLinkCount: '0', lockedBy: '', lockedUntil: '', nextPageId: '', prevPageId: '', usedAsMastery: 'false', proposalEditNum: '0', permissions: { edit: { has: 'false', reason: 'You don't have domain permission to edit this page' }, proposeEdit: { has: 'true', reason: '' }, delete: { has: 'false', reason: 'You don't have domain permission to delete this page' }, comment: { has: 'false', reason: 'You can't comment in this domain because you are not a member' }, proposeComment: { has: 'true', reason: '' } }, summaries: {}, creatorIds: [ 'EliezerYudkowsky', 'AlexeiAndreev' ], childIds: [ 'cognitive_steganography' ], parentIds: [ 'corrigibility' ], commentIds: [], questionIds: [], tagIds: [ 'work_in_progress_meta_tag' ], relatedIds: [], markIds: [], explanations: [], learnMore: [], requirements: [], subjects: [], lenses: [], lensParentId: '', pathPages: [], learnMoreTaughtMap: {}, learnMoreCoveredMap: {}, learnMoreRequiredMap: {}, editHistory: {}, domainSubmissions: {}, answers: [], answerCount: '0', commentCount: '0', newCommentCount: '0', linkedMarkCount: '0', changeLogs: [ { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '9459', pageId: 'programmer_deception', userId: 'EliezerYudkowsky', edit: '3', type: 'newChild', createdAt: '2016-04-28 23:15:34', auxPageId: 'cognitive_steganography', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '3861', pageId: 'programmer_deception', userId: 'AlexeiAndreev', edit: '3', type: 'newEdit', createdAt: '2015-12-16 04:53:36', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '3860', pageId: 'programmer_deception', userId: 'AlexeiAndreev', edit: '0', type: 'newAlias', createdAt: '2015-12-16 04:53:35', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '1120', pageId: 'programmer_deception', userId: 'AlexeiAndreev', edit: '1', type: 'newUsedAsTag', createdAt: '2015-10-28 03:47:09', auxPageId: 'work_in_progress_meta_tag', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '326', pageId: 'programmer_deception', userId: 'AlexeiAndreev', edit: '1', type: 'newParent', createdAt: '2015-10-28 03:46:51', auxPageId: 'corrigibility', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '1539', pageId: 'programmer_deception', userId: 'EliezerYudkowsky', edit: '2', type: 'newEdit', createdAt: '2015-07-16 01:08:24', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '1538', pageId: 'programmer_deception', userId: 'EliezerYudkowsky', edit: '1', type: 'newEdit', createdAt: '2015-07-16 01:01:53', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' } ], feedSubmissions: [], searchStrings: {}, hasChildren: 'true', hasParents: 'true', redAliases: {}, improvementTagIds: [], nonMetaTagIds: [], todos: [], slowDownMap: 'null', speedUpMap: 'null', arcPageIds: 'null', contentRequests: {} }