{ localUrl: '../page/817.html', arbitalUrl: 'https://arbital.com/p/817', rawJsonUrl: '../raw/817.json', likeableId: '0', likeableType: 'page', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], pageId: '817', edit: '1', editSummary: '', prevEdit: '0', currentEdit: '1', wasPublished: 'true', type: 'comment', title: '"This isn't quite right as an exposition of Lewi..."', clickbait: '', textLength: '1284', alias: '817', externalUrl: '', sortChildrenBy: 'recentFirst', hasVote: 'false', voteType: '', votesAnonymous: 'false', editCreatorId: 'BenPlommer', editCreatedAt: '2017-03-07 21:48:47', pageCreatorId: 'BenPlommer', pageCreatedAt: '2017-03-07 21:48:47', seeDomainId: '0', editDomainId: '2411', submitToDomainId: '0', isAutosave: 'false', isSnapshot: 'false', isLiveEdit: 'true', isMinorEdit: 'false', indirectTeacher: 'false', todoCount: '0', isEditorComment: 'false', isApprovedComment: 'false', isResolved: 'false', snapshotText: '', anchorContext: 'On the majority view within contemporary decision theory, this is the reply to the "If you're so rational, why aincha rich?" argument in favor of one\\-boxing on Newcomb's Problem\\. Somebody who actually takes only Box B is merely 'managing the news' about Box B, not actually acting to maximize the causal impacts of their actions\\. Omega choosing to reward people who only take Box B is akin to happening to already have toxoplasmosis at the start of the decision problem, or Omega deciding to reward only evidential decision theorists\\. Evidential agents only seem to win in 'Why aincha rich?' scenarios because they're managing the news in a way that an artificial problem setup declares to be news about wealth\\.', anchorText: 'On the majority view within contemporary decision theory, this is the reply to the "If you're so rational, why aincha rich?" argument in favor of one\\-boxing on Newcomb's Problem\\. Somebody who actually takes only Box B is merely 'managing the news' about Box B, not actually acting to maximize the causal impacts of their actions\\. Omega choosing to reward people who only take Box B is akin to happening to already have toxoplasmosis at the start of the decision problem, or Omega deciding to reward only evidential decision theorists\\. Evidential agents only seem to win in 'Why aincha rich?' scenarios because they're managing the news in a way that an artificial problem setup declares to be news about wealth\\.', anchorOffset: '0', mergedInto: '', isDeleted: 'false', viewCount: '360', text: 'This isn't quite right as an exposition of Lewis's argument – it elides the distinction between the irrationality of "managing the news" and the way that (according to Lewis) the scenario pre-rewards an irrational choice. Evidential agents don't just "seem" to win – they *really do* win, because the scenario is set up to arbitrarily pre-reward them for being the kind of agents who one-box. *Furthermore*, it's claimed that the behaviour which is thereby arbitrarily pre-rewarded is irrational, because it amounts to managing the news. \n\nThe sense in which two-boxing is said to be irrational news-management is that doing so will give you evidence that you have been pre-rewarded, but won't causally affect the contents of the box – if you're an evidential agent, and have been pre-rewarded as such, you would still get the \\$1m if you were to miraculously, unforeseeably switch to two-boxing; and if you're a causal agent, and have been pre-punished as such, you would still not get the \\$1m if you were to miraculously, unforeseeably switch to one-boxing. The kind of agents that one-box really do do better, but once you've been rewarded for being that kind of person you may as well act contrary to your kind two-box anyway, despite the negative news value of doing so.', metaText: '', isTextLoaded: 'true', isSubscribedToDiscussion: 'false', isSubscribedToUser: 'false', isSubscribedAsMaintainer: 'false', discussionSubscriberCount: '1', maintainerCount: '1', userSubscriberCount: '0', lastVisit: '', hasDraft: 'false', votes: [], voteSummary: 'null', muVoteSummary: '0', voteScaling: '0', currentUserVote: '-2', voteCount: '0', lockedVoteType: '', maxEditEver: '0', redLinkCount: '0', lockedBy: '', lockedUntil: '', nextPageId: '', prevPageId: '', usedAsMastery: 'false', proposalEditNum: '0', permissions: { edit: { has: 'false', reason: 'You don't have domain permission to edit this page' }, proposeEdit: { has: 'true', reason: '' }, delete: { has: 'false', reason: 'You don't have domain permission to delete this page' }, comment: { has: 'false', reason: 'You can't comment in this domain because you are not a member' }, proposeComment: { has: 'true', reason: '' } }, summaries: {}, creatorIds: [ 'BenPlommer' ], childIds: [], parentIds: [ 'ldt_intro_phil' ], commentIds: [], questionIds: [], tagIds: [], relatedIds: [], markIds: [], explanations: [], learnMore: [], requirements: [], subjects: [], lenses: [], lensParentId: '', pathPages: [], learnMoreTaughtMap: {}, learnMoreCoveredMap: {}, learnMoreRequiredMap: {}, editHistory: {}, domainSubmissions: {}, answers: [], answerCount: '0', commentCount: '0', newCommentCount: '0', linkedMarkCount: '0', changeLogs: [ { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '22255', pageId: '817', userId: 'BenPlommer', edit: '1', type: 'newEdit', createdAt: '2017-03-07 21:48:47', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' } ], feedSubmissions: [], searchStrings: {}, hasChildren: 'false', hasParents: 'true', redAliases: {}, improvementTagIds: [], nonMetaTagIds: [], todos: [], slowDownMap: 'null', speedUpMap: 'null', arcPageIds: 'null', contentRequests: {} }