{ localUrl: '../page/85.html', arbitalUrl: 'https://arbital.com/p/85', rawJsonUrl: '../raw/85.json', likeableId: '2424', likeableType: 'page', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], pageId: '85', edit: '1', editSummary: '', prevEdit: '0', currentEdit: '1', wasPublished: 'true', type: 'comment', title: '"Boundedly rational ?means r..."', clickbait: '', textLength: '2058', alias: '85', externalUrl: '', sortChildrenBy: 'recentFirst', hasVote: 'false', voteType: '', votesAnonymous: 'false', editCreatorId: 'KenziAmodei', editCreatedAt: '2015-06-21 08:09:14', pageCreatorId: 'KenziAmodei', pageCreatedAt: '2015-06-21 08:09:14', seeDomainId: '0', editDomainId: 'EliezerYudkowsky', submitToDomainId: '0', isAutosave: 'false', isSnapshot: 'false', isLiveEdit: 'true', isMinorEdit: 'false', indirectTeacher: 'false', todoCount: '0', isEditorComment: 'false', isApprovedComment: 'true', isResolved: 'false', snapshotText: '', anchorContext: '', anchorText: '', anchorOffset: '0', mergedInto: '', isDeleted: 'false', viewCount: '1597', text: 'Boundedly rational ?means rational even when you don't have infinite computing power? Naturalistic ?refers to naturalized induction, where you're not a cartesian dualist who thinks your processes can't be messed with by stuff in the world and also you're not just thinking of yourself as a little black dot in the middle of Conway's game of life? Google says economic agent means one who has an impact on the economy by buying, selling or trading; I assign 65% to that being roughly the meaning in use here?\r \nSomehow the epistemic efficiency thing reminds me of the halting problem; that whatever we try and do, it can just do it more. Or... somehow it actually reminds me more the other way, that it's solved the halting problem on us. Apologies for abuse of technical terms.\r \nSo an epistemically efficient agent, for example, is already overcoming all the pitfalls you see in movies of "not being able to understand the human drive for self sacrifice" or love, or etc.\r \nIs there an analogue of efficient markets for instrumental efficiency? Some sort of master-strategy-outputting process that exists (or maybe plausibly exists in at least some special cases) in our world? Maybe Deep Blue at chess, I guess? Google maps for driving directions (for the most part)? *reads to next paragraph*. Well; not sure whether to update against Google Maps being an example from the fact that it's not mentioned in "instrumentally efficient agents are presently unknown" section\r \nThat said, "outside very limited domains" - well, I guess "the whole stock market, mostly" is a fair bit broader than "chess" or even "driving directions". Ah, I see; so though chess programs are overall better than humans, they're not hitting the "every silly-looking move is secretly brilliant" bar yet. Oh, and that's *definitely* not true of google maps - if it looks like it's making you do something stupid, you should have like 40% that it's in fact being stupid. Got it.\r \nI can't tell if I should also be trying to think about whether there's a reasonable de', metaText: '', isTextLoaded: 'true', isSubscribedToDiscussion: 'false', isSubscribedToUser: 'false', isSubscribedAsMaintainer: 'false', discussionSubscriberCount: '1', maintainerCount: '1', userSubscriberCount: '0', lastVisit: '2016-02-27 00:11:32', hasDraft: 'false', votes: [], voteSummary: 'null', muVoteSummary: '0', voteScaling: '0', currentUserVote: '-2', voteCount: '0', lockedVoteType: '', maxEditEver: '0', redLinkCount: '0', lockedBy: '', lockedUntil: '', nextPageId: '', prevPageId: '', usedAsMastery: 'false', proposalEditNum: '0', permissions: { edit: { has: 'false', reason: 'You don't have domain permission to edit this page' }, proposeEdit: { has: 'true', reason: '' }, delete: { has: 'false', reason: 'You don't have domain permission to delete this page' }, comment: { has: 'false', reason: 'You can't comment in this domain because you are not a member' }, proposeComment: { has: 'true', reason: '' } }, summaries: {}, creatorIds: [ 'KenziAmodei' ], childIds: [], parentIds: [ 'efficiency' ], commentIds: [ '86' ], questionIds: [], tagIds: [], relatedIds: [], markIds: [], explanations: [], learnMore: [], requirements: [], subjects: [], lenses: [], lensParentId: '', pathPages: [], learnMoreTaughtMap: {}, learnMoreCoveredMap: {}, learnMoreRequiredMap: {}, editHistory: {}, domainSubmissions: {}, answers: [], answerCount: '0', commentCount: '0', newCommentCount: '0', linkedMarkCount: '0', changeLogs: [ { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '293', pageId: '85', userId: 'AlexeiAndreev', edit: '1', type: 'newParent', createdAt: '2015-10-28 03:46:51', auxPageId: 'efficiency', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '1273', pageId: '85', userId: 'KenziAmodei', edit: '1', type: 'newEdit', createdAt: '2015-06-21 08:09:14', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' } ], feedSubmissions: [], searchStrings: {}, hasChildren: 'false', hasParents: 'true', redAliases: {}, improvementTagIds: [], nonMetaTagIds: [], todos: [], slowDownMap: 'null', speedUpMap: 'null', arcPageIds: 'null', contentRequests: {} }