{ localUrl: '../page/1pd.html', arbitalUrl: 'https://arbital.com/p/1pd', rawJsonUrl: '../raw/1pd.json', likeableId: 'MichaelPace', likeableType: 'page', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], pageId: '1pd', edit: '2', editSummary: '', prevEdit: '1', currentEdit: '2', wasPublished: 'true', type: 'comment', title: '"Thanks for the reply. I agr..."', clickbait: '', textLength: '1623', alias: '1pd', externalUrl: '', sortChildrenBy: 'recentFirst', hasVote: 'false', voteType: '', votesAnonymous: 'false', editCreatorId: 'AntonGeraschenko', editCreatedAt: '2016-01-20 13:11:23', pageCreatorId: 'AntonGeraschenko', pageCreatedAt: '2016-01-20 06:17:36', seeDomainId: '0', editDomainId: 'EliezerYudkowsky', submitToDomainId: '0', isAutosave: 'false', isSnapshot: 'false', isLiveEdit: 'true', isMinorEdit: 'false', indirectTeacher: 'false', todoCount: '0', isEditorComment: 'false', isApprovedComment: 'true', isResolved: 'false', snapshotText: '', anchorContext: '', anchorText: '', anchorOffset: '0', mergedInto: '', isDeleted: 'false', viewCount: '5195', text: 'Thanks for the reply. I agree that strong Inevitability is unreasonable, and I understand the function of #1 and #2 in disrupting a prior frame of mind which assumes strong Inevitability, but that's not the only alternative to Orthogonality. I'm surprised that the arguments are considered successively stronger arguments in favor of Orthogonality, since #6 basically says "under reasonable hypotheses, Orthogonality may well be false." (I admit that's a skewed reading, but I don't know what the referenced ongoing work looks like, so I'm skipping that bit for now. [Edit: is this "tiling agents"? I'm not familiar with that work, but I can go learn about it.])\n\nThe other arguments are interesting commentary, but don't argue that Orthogonality is true for agents we ought to care about.\n\n* Gandhian stability argues that self-modifying agents will try to preserve their preference systems, but not that they can become arbitrarily powerful while doing so. As it happens, circular preference systems illustrate how Gandhian stability could limit how powerful a cognitive agent can become.\n* The unbounded agents argument says Orthogonality is true when "mind space" is broader than what we care about.\n* The search tractability argument looks like a statement about the relative difficulty of *accomplishing* different goals, not the relative difficulties of holding those goals. I don't mean to dismiss the argument, but I don't understand it. I'm not even clear on exactly what the argument is saying about the tractability of searching for strategies for different goals. That it's the same for all possible goals?', metaText: '', isTextLoaded: 'true', isSubscribedToDiscussion: 'false', isSubscribedToUser: 'false', isSubscribedAsMaintainer: 'false', discussionSubscriberCount: '0', maintainerCount: '0', userSubscriberCount: '0', lastVisit: '2016-02-27 11:08:37', hasDraft: 'false', votes: [], voteSummary: 'null', muVoteSummary: '0', voteScaling: '0', currentUserVote: '-2', voteCount: '0', lockedVoteType: '', maxEditEver: '0', redLinkCount: '0', lockedBy: '', lockedUntil: '', nextPageId: '', prevPageId: '', usedAsMastery: 'false', proposalEditNum: '0', permissions: { edit: { has: 'false', reason: 'You don't have domain permission to edit this page' }, proposeEdit: { has: 'true', reason: '' }, delete: { has: 'false', reason: 'You don't have domain permission to delete this page' }, comment: { has: 'false', reason: 'You can't comment in this domain because you are not a member' }, proposeComment: { has: 'true', reason: '' } }, summaries: {}, creatorIds: [ 'AntonGeraschenko' ], childIds: [], parentIds: [ '1m1', 'orthogonality' ], commentIds: [], questionIds: [], tagIds: [], relatedIds: [], markIds: [], explanations: [], learnMore: [], requirements: [], subjects: [], lenses: [], lensParentId: '', pathPages: [], learnMoreTaughtMap: {}, learnMoreCoveredMap: {}, learnMoreRequiredMap: {}, editHistory: {}, domainSubmissions: {}, answers: [], answerCount: '0', commentCount: '0', newCommentCount: '0', linkedMarkCount: '0', changeLogs: [ { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '5483', pageId: '1pd', userId: 'AntonGeraschenko', edit: '2', type: 'newEdit', createdAt: '2016-01-20 13:11:23', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '5482', pageId: '1pd', userId: 'AntonGeraschenko', edit: '1', type: 'newEdit', createdAt: '2016-01-20 06:17:37', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '5479', pageId: '1pd', userId: 'AntonGeraschenko', edit: '0', type: 'newParent', createdAt: '2016-01-20 05:26:54', auxPageId: 'orthogonality', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '5481', pageId: '1pd', userId: 'AntonGeraschenko', edit: '0', type: 'newParent', createdAt: '2016-01-20 05:26:54', auxPageId: '1m1', oldSettingsValue: '', newSettingsValue: '' } ], feedSubmissions: [], searchStrings: {}, hasChildren: 'false', hasParents: 'true', redAliases: {}, improvementTagIds: [], nonMetaTagIds: [], todos: [], slowDownMap: 'null', speedUpMap: 'null', arcPageIds: 'null', contentRequests: {} }