{ localUrl: '../page/1hf.html', arbitalUrl: 'https://arbital.com/p/1hf', rawJsonUrl: '../raw/1hf.json', likeableId: '455', likeableType: 'page', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], pageId: '1hf', edit: '1', editSummary: '', prevEdit: '0', currentEdit: '1', wasPublished: 'true', type: 'comment', title: '"Act-based is a more general..."', clickbait: '', textLength: '1301', alias: '1hf', externalUrl: '', sortChildrenBy: 'recentFirst', hasVote: 'false', voteType: '', votesAnonymous: 'false', editCreatorId: 'PaulChristiano', editCreatedAt: '2015-12-30 02:03:42', pageCreatorId: 'PaulChristiano', pageCreatedAt: '2015-12-30 02:03:42', seeDomainId: '0', editDomainId: 'EliezerYudkowsky', submitToDomainId: '0', isAutosave: 'false', isSnapshot: 'false', isLiveEdit: 'true', isMinorEdit: 'false', indirectTeacher: 'false', todoCount: '0', isEditorComment: 'false', isApprovedComment: 'true', isResolved: 'false', snapshotText: '', anchorContext: '', anchorText: '', anchorOffset: '0', mergedInto: '', isDeleted: 'false', viewCount: '828', text: 'Act-based is a more general designation, that includes e.g. imitation learning (and value learning where the agent learns short-term instrumental preferences of the user rather than long-term preferences).\n\nSo you see the difference as whether the programmers have to actually supply the short-term objective, or whether the AI learns the short-term objective they would have defined / which they would accept/prefer?\n\nThe distinction seems to buy you relatively little safety at a great cost (basically taking the system from "maybe it's good enough?" to "obviously operating at an incredible disadvantage"). You seem to think that it buys you much more safety than I do.\n\nIt seems like the main extra risk is from the AI making bad predictions about what the humans would do. Mostly this seems like it will lead to harmless failures if the humans behave responsibly, and it requires only very weak models of human behavior to avoid most of the really bad failures. The main new catastrophic risk I see is the agent thinking it is in a simulation. Are there other similar problems for the act-based approach?\n\n(If we use approval-direction instead of imitation then we may introduce additional concerns depending on how we set it up. But those seem orthogonal to the actual involvement of the human.)', metaText: '', isTextLoaded: 'true', isSubscribedToDiscussion: 'false', isSubscribedToUser: 'false', isSubscribedAsMaintainer: 'false', discussionSubscriberCount: '0', maintainerCount: '0', userSubscriberCount: '0', lastVisit: '2016-02-25 04:36:01', hasDraft: 'false', votes: [], voteSummary: 'null', muVoteSummary: '0', voteScaling: '0', currentUserVote: '-2', voteCount: '0', lockedVoteType: '', maxEditEver: '0', redLinkCount: '0', lockedBy: '', lockedUntil: '', nextPageId: '', prevPageId: '', usedAsMastery: 'false', proposalEditNum: '0', permissions: { edit: { has: 'false', reason: 'You don't have domain permission to edit this page' }, proposeEdit: { has: 'true', reason: '' }, delete: { has: 'false', reason: 'You don't have domain permission to delete this page' }, comment: { has: 'false', reason: 'You can't comment in this domain because you are not a member' }, proposeComment: { has: 'true', reason: '' } }, summaries: {}, creatorIds: [ 'PaulChristiano' ], childIds: [], parentIds: [ '1gj', 'task_agi' ], commentIds: [], questionIds: [], tagIds: [], relatedIds: [], markIds: [], explanations: [], learnMore: [], requirements: [], subjects: [], lenses: [], lensParentId: '', pathPages: [], learnMoreTaughtMap: {}, learnMoreCoveredMap: {}, learnMoreRequiredMap: {}, editHistory: {}, domainSubmissions: {}, answers: [], answerCount: '0', commentCount: '0', newCommentCount: '0', linkedMarkCount: '0', changeLogs: [ { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '4795', pageId: '1hf', userId: 'PaulChristiano', edit: '1', type: 'newEdit', createdAt: '2015-12-30 02:03:42', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '4790', pageId: '1hf', userId: 'PaulChristiano', edit: '0', type: 'newParent', createdAt: '2015-12-30 01:54:40', auxPageId: 'task_agi', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '4792', pageId: '1hf', userId: 'PaulChristiano', edit: '0', type: 'newParent', createdAt: '2015-12-30 01:54:40', auxPageId: '1gj', oldSettingsValue: '', newSettingsValue: '' } ], feedSubmissions: [], searchStrings: {}, hasChildren: 'false', hasParents: 'true', redAliases: {}, improvementTagIds: [], nonMetaTagIds: [], todos: [], slowDownMap: 'null', speedUpMap: 'null', arcPageIds: 'null', contentRequests: {} }