{ localUrl: '../page/81.html', arbitalUrl: 'https://arbital.com/p/81', rawJsonUrl: '../raw/81.json', likeableId: '2420', likeableType: 'page', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], pageId: '81', edit: '2', editSummary: '', prevEdit: '1', currentEdit: '2', wasPublished: 'true', type: 'comment', title: '"Can we properly classify th..."', clickbait: '', textLength: '1047', alias: '81', externalUrl: '', sortChildrenBy: 'recentFirst', hasVote: 'false', voteType: '', votesAnonymous: 'false', editCreatorId: 'EliezerYudkowsky', editCreatedAt: '2015-12-30 00:15:25', pageCreatorId: 'EliezerYudkowsky', pageCreatedAt: '2015-06-19 19:13:24', seeDomainId: '0', editDomainId: 'EliezerYudkowsky', submitToDomainId: '0', isAutosave: 'false', isSnapshot: 'false', isLiveEdit: 'true', isMinorEdit: 'false', indirectTeacher: 'false', todoCount: '0', isEditorComment: 'false', isApprovedComment: 'true', isResolved: 'false', snapshotText: '', anchorContext: '', anchorText: '', anchorOffset: '0', mergedInto: '', isDeleted: 'false', viewCount: '549', text: 'Can we properly classify this as an error? If there's an AI that will be hacked, or maybe hack itself, only if it *correctly* forecasts that distant superintelligences are creating millions more simulations than the actual AI, then I'd expect distant superintelligences to create millions of simulations. Simulating a pre-intelligence-explosion AI is extremely cheap. Sure, *not* doing it is even *cheaper*, but if the AI has a sufficiently good model of the distant SI to not be fooled by fakeouts in one decision that get corrected by another decision, then the distant SI will expend the resources to actually simulate.\n \n\n \nIt seems to me that we'd have to address this issue in a way that's robust to the case where the distant SI is actually simulating a million copies of our local AI that our local AI can't distinguish from itself. If we only correct erroneous beliefs about such simulation by processes that only work to eject false beliefs, then perhaps the distant SI can hack us by making the local AI's belief not be erroneous.', metaText: '', isTextLoaded: 'true', isSubscribedToDiscussion: 'false', isSubscribedToUser: 'false', isSubscribedAsMaintainer: 'false', discussionSubscriberCount: '0', maintainerCount: '0', userSubscriberCount: '0', lastVisit: '2016-02-24 00:35:07', hasDraft: 'false', votes: [], voteSummary: 'null', muVoteSummary: '0', voteScaling: '0', currentUserVote: '-2', voteCount: '0', lockedVoteType: '', maxEditEver: '0', redLinkCount: '0', lockedBy: '', lockedUntil: '', nextPageId: '', prevPageId: '', usedAsMastery: 'false', proposalEditNum: '0', permissions: { edit: { has: 'false', reason: 'You don't have domain permission to edit this page' }, proposeEdit: { has: 'true', reason: '' }, delete: { has: 'false', reason: 'You don't have domain permission to delete this page' }, comment: { has: 'false', reason: 'You can't comment in this domain because you are not a member' }, proposeComment: { has: 'true', reason: '' } }, summaries: {}, creatorIds: [ 'EliezerYudkowsky' ], childIds: [], parentIds: [ 'probable_environment_hacking', '79' ], commentIds: [], questionIds: [], tagIds: [], relatedIds: [], markIds: [], explanations: [], learnMore: [], requirements: [], subjects: [], lenses: [], lensParentId: '', pathPages: [], learnMoreTaughtMap: {}, learnMoreCoveredMap: {}, learnMoreRequiredMap: {}, editHistory: {}, domainSubmissions: {}, answers: [], answerCount: '0', commentCount: '0', newCommentCount: '0', linkedMarkCount: '0', changeLogs: [ { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '4740', pageId: '81', userId: 'EliezerYudkowsky', edit: '2', type: 'newEdit', createdAt: '2015-12-30 00:15:25', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '17', pageId: '81', userId: 'AlexeiAndreev', edit: '1', type: 'newParent', createdAt: '2015-10-28 03:46:51', auxPageId: '79', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '430', pageId: '81', userId: 'AlexeiAndreev', edit: '1', type: 'newParent', createdAt: '2015-10-28 03:46:51', auxPageId: 'probable_environment_hacking', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '1269', pageId: '81', userId: 'EliezerYudkowsky', edit: '1', type: 'newEdit', createdAt: '2015-06-19 19:13:24', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' } ], feedSubmissions: [], searchStrings: {}, hasChildren: 'false', hasParents: 'true', redAliases: {}, improvementTagIds: [], nonMetaTagIds: [], todos: [], slowDownMap: 'null', speedUpMap: 'null', arcPageIds: 'null', contentRequests: {} }