{
  localUrl: '../page/moral_uncertainty.html',
  arbitalUrl: 'https://arbital.com/p/moral_uncertainty',
  rawJsonUrl: '../raw/7s2.json',
  likeableId: '0',
  likeableType: 'page',
  myLikeValue: '0',
  likeCount: '0',
  dislikeCount: '0',
  likeScore: '0',
  individualLikes: [],
  pageId: 'moral_uncertainty',
  edit: '1',
  editSummary: '',
  prevEdit: '0',
  currentEdit: '1',
  wasPublished: 'true',
  type: 'wiki',
  title: 'Moral uncertainty',
  clickbait: 'A meta-utility function in which the utility function as usually considered, takes on different values in different possible worlds, potentially distinguishable by evidence.',
  textLength: '2104',
  alias: 'moral_uncertainty',
  externalUrl: '',
  sortChildrenBy: 'likes',
  hasVote: 'false',
  voteType: '',
  votesAnonymous: 'false',
  editCreatorId: 'EliezerYudkowsky',
  editCreatedAt: '2017-02-08 04:40:43',
  pageCreatorId: 'EliezerYudkowsky',
  pageCreatedAt: '2017-02-08 04:40:43',
  seeDomainId: '0',
  editDomainId: 'EliezerYudkowsky',
  submitToDomainId: '0',
  isAutosave: 'false',
  isSnapshot: 'false',
  isLiveEdit: 'true',
  isMinorEdit: 'false',
  indirectTeacher: 'false',
  todoCount: '0',
  isEditorComment: 'false',
  isApprovedComment: 'false',
  isResolved: 'false',
  snapshotText: '',
  anchorContext: '',
  anchorText: '',
  anchorOffset: '0',
  mergedInto: '',
  isDeleted: 'false',
  viewCount: '94',
  text: '[summary: "Moral uncertainty" in the context of AI refers to an agent with an "uncertain utility function". That is, we can view the agent as pursuing a [1fw utility function] that takes on different values in different subsets of possible worlds.\n\nFor example, an agent might have a [meta_utility meta-utility function] saying that eating cake has a utility of €8 in worlds where Lee Harvey Oswald shot John F. Kennedy and that eating cake has a utility of €10 in worlds where it was the other way around.  This agent will be motivated to inquire into political history to find out which utility function is probably the 'correct' one (relative to this meta-utility function).]\n\n"Moral uncertainty" in the context of AI refers to an agent with an "uncertain utility function".  That is, we can view the agent as pursuing a [1fw utility function] that takes on different values in different subsets of possible worlds.\n\nFor example, an agent might have a [meta_utility meta-utility function] saying that eating cake has a utility of €8 in worlds where Lee Harvey Oswald shot John F. Kennedy and that eating cake has a utility of €10 in worlds where it was the other way around.  This agent will be motivated to inquire into political history to find out which utility function is probably the 'correct' one (relative to this meta-utility function), though it will never be [4mq absolutely sure].\n\nMoral uncertainty must be resolvable by some conceivable observation in order to function as uncertainty.  Suppose for example that an agent's probability distribution $\\Delta U$ over the 'true' utility function $U$ asserts a dependency on a fair quantum coin that was flipped inside a sealed box then destroyed by explosives: the utility function is $U_1$ over outcomes in the worlds where the coin came up heads, and if the coin came up tails the utility function is $U_2.$  If the agent thinks it has no way of ever figuring out what happened inside the box, it will thereafter behave as if it had a single, constant, certain utility function equal to $0.5 \\cdot U_1 + 0.5 \\cdot U_2.$',
  metaText: '',
  isTextLoaded: 'true',
  isSubscribedToDiscussion: 'false',
  isSubscribedToUser: 'false',
  isSubscribedAsMaintainer: 'false',
  discussionSubscriberCount: '1',
  maintainerCount: '1',
  userSubscriberCount: '0',
  lastVisit: '',
  hasDraft: 'false',
  votes: [],
  voteSummary: 'null',
  muVoteSummary: '0',
  voteScaling: '0',
  currentUserVote: '-2',
  voteCount: '0',
  lockedVoteType: '',
  maxEditEver: '0',
  redLinkCount: '0',
  lockedBy: '',
  lockedUntil: '',
  nextPageId: '',
  prevPageId: '',
  usedAsMastery: 'false',
  proposalEditNum: '0',
  permissions: {
    edit: {
      has: 'false',
      reason: 'You don't have domain permission to edit this page'
    },
    proposeEdit: {
      has: 'true',
      reason: ''
    },
    delete: {
      has: 'false',
      reason: 'You don't have domain permission to delete this page'
    },
    comment: {
      has: 'false',
      reason: 'You can't comment in this domain because you are not a member'
    },
    proposeComment: {
      has: 'true',
      reason: ''
    }
  },
  summaries: {},
  creatorIds: [
    'EliezerYudkowsky'
  ],
  childIds: [
    'ideal_target'
  ],
  parentIds: [
    'preference_framework'
  ],
  commentIds: [],
  questionIds: [],
  tagIds: [
    'start_meta_tag'
  ],
  relatedIds: [],
  markIds: [],
  explanations: [],
  learnMore: [],
  requirements: [],
  subjects: [],
  lenses: [],
  lensParentId: '',
  pathPages: [],
  learnMoreTaughtMap: {},
  learnMoreCoveredMap: {},
  learnMoreRequiredMap: {},
  editHistory: {},
  domainSubmissions: {},
  answers: [],
  answerCount: '0',
  commentCount: '0',
  newCommentCount: '0',
  linkedMarkCount: '0',
  changeLogs: [
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '21967',
      pageId: 'moral_uncertainty',
      userId: 'EliezerYudkowsky',
      edit: '0',
      type: 'newChild',
      createdAt: '2017-02-08 04:56:28',
      auxPageId: 'ideal_target',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '21964',
      pageId: 'moral_uncertainty',
      userId: 'EliezerYudkowsky',
      edit: '0',
      type: 'newParent',
      createdAt: '2017-02-08 04:40:53',
      auxPageId: 'preference_framework',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '21965',
      pageId: 'moral_uncertainty',
      userId: 'EliezerYudkowsky',
      edit: '0',
      type: 'newTag',
      createdAt: '2017-02-08 04:40:53',
      auxPageId: 'start_meta_tag',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '21962',
      pageId: 'moral_uncertainty',
      userId: 'EliezerYudkowsky',
      edit: '1',
      type: 'newEdit',
      createdAt: '2017-02-08 04:40:43',
      auxPageId: '',
      oldSettingsValue: '',
      newSettingsValue: ''
    }
  ],
  feedSubmissions: [],
  searchStrings: {},
  hasChildren: 'true',
  hasParents: 'true',
  redAliases: {},
  improvementTagIds: [],
  nonMetaTagIds: [],
  todos: [],
  slowDownMap: 'null',
  speedUpMap: 'null',
  arcPageIds: 'null',
  contentRequests: {}
}