{ localUrl: '../page/1gg.html', arbitalUrl: 'https://arbital.com/p/1gg', rawJsonUrl: '../raw/1gg.json', likeableId: '424', likeableType: 'page', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], pageId: '1gg', edit: '1', editSummary: '', prevEdit: '0', currentEdit: '1', wasPublished: 'true', type: 'comment', title: '"(Understandable to focus on..."', clickbait: '', textLength: '1540', alias: '1gg', externalUrl: '', sortChildrenBy: 'recentFirst', hasVote: 'false', voteType: '', votesAnonymous: 'false', editCreatorId: 'PaulChristiano', editCreatedAt: '2015-12-29 20:24:33', pageCreatorId: 'PaulChristiano', pageCreatedAt: '2015-12-29 20:24:33', seeDomainId: '0', editDomainId: 'EliezerYudkowsky', submitToDomainId: '0', isAutosave: 'false', isSnapshot: 'false', isLiveEdit: 'true', isMinorEdit: 'false', indirectTeacher: 'false', todoCount: '0', isEditorComment: 'false', isApprovedComment: 'true', isResolved: 'false', snapshotText: '', anchorContext: '', anchorText: '', anchorOffset: '0', mergedInto: '', isDeleted: 'false', viewCount: '5194', text: '(Understandable to focus on explanation for now. Threaded replies to replies would also be great eventually.)\n\nEliezer: I assumed 95% efficiency was not sufficient; I was thinking about asymptotic equivalence, i.e. efficiency approaching 1 as the sophistication of the system increases. Asymptotic equivalence of technological capability seems less interesting than of cognitive capability, though they are equivalent if either we construe technology broadly to include cognitive tasks or if we measure technological capability in a way with lots of headroom.\n\n(Nick says "more or less any level of intelligence," which I guess could be taken to exclude the very highest levels of intelligence, but based on his other writing I think he intended merely to exclude low levels. The language in this post seems to explicitly cover arbitrarily high efficiency.) \n\nI still think that 99% confidence is way too high even if you allow 50% efficiency, though at that point I would at least go for "very likely."\n\nAlso of course you need to be able to replace "paperclip maximizer" with anything. When I imagine orthogonality failing, "human values" seem like a much more likely failure case than "paperclips."\n\nI don't think that this disagreement about orthogonality is especially important, I mostly found the 99%'s amusing and wanted to give you a hard time about it. It does suggest that in some sense I might be more pessimistic about the AI control problem itself than you are, with my optimism driven by faith in humanity / the AI community.', metaText: '', isTextLoaded: 'true', isSubscribedToDiscussion: 'false', isSubscribedToUser: 'false', isSubscribedAsMaintainer: 'false', discussionSubscriberCount: '0', maintainerCount: '0', userSubscriberCount: '0', lastVisit: '2016-02-27 11:08:37', hasDraft: 'false', votes: [], voteSummary: 'null', muVoteSummary: '0', voteScaling: '0', currentUserVote: '-2', voteCount: '0', lockedVoteType: '', maxEditEver: '0', redLinkCount: '0', lockedBy: '', lockedUntil: '', nextPageId: '', prevPageId: '', usedAsMastery: 'false', proposalEditNum: '0', permissions: { edit: { has: 'false', reason: 'You don't have domain permission to edit this page' }, proposeEdit: { has: 'true', reason: '' }, delete: { has: 'false', reason: 'You don't have domain permission to delete this page' }, comment: { has: 'false', reason: 'You can't comment in this domain because you are not a member' }, proposeComment: { has: 'true', reason: '' } }, summaries: {}, creatorIds: [ 'PaulChristiano' ], childIds: [], parentIds: [ '1fr', 'orthogonality' ], commentIds: [], questionIds: [], tagIds: [], relatedIds: [], markIds: [], explanations: [], learnMore: [], requirements: [], subjects: [], lenses: [], lensParentId: '', pathPages: [], learnMoreTaughtMap: {}, learnMoreCoveredMap: {}, learnMoreRequiredMap: {}, editHistory: {}, domainSubmissions: {}, answers: [], answerCount: '0', commentCount: '0', newCommentCount: '0', linkedMarkCount: '0', changeLogs: [ { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '4658', pageId: '1gg', userId: 'PaulChristiano', edit: '1', type: 'newEdit', createdAt: '2015-12-29 20:24:33', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '4655', pageId: '1gg', userId: 'PaulChristiano', edit: '0', type: 'newParent', createdAt: '2015-12-29 20:08:59', auxPageId: 'orthogonality', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '4657', pageId: '1gg', userId: 'PaulChristiano', edit: '0', type: 'newParent', createdAt: '2015-12-29 20:08:59', auxPageId: '1fr', oldSettingsValue: '', newSettingsValue: '' } ], feedSubmissions: [], searchStrings: {}, hasChildren: 'false', hasParents: 'true', redAliases: {}, improvementTagIds: [], nonMetaTagIds: [], todos: [], slowDownMap: 'null', speedUpMap: 'null', arcPageIds: 'null', contentRequests: {} }