{
  localUrl: '../page/82.html',
  arbitalUrl: 'https://arbital.com/p/82',
  rawJsonUrl: '../raw/82.json',
  likeableId: '2421',
  likeableType: 'page',
  myLikeValue: '0',
  likeCount: '0',
  dislikeCount: '0',
  likeScore: '0',
  individualLikes: [],
  pageId: '82',
  edit: '1',
  editSummary: '',
  prevEdit: '0',
  currentEdit: '1',
  wasPublished: 'true',
  type: 'comment',
  title: '"Paul, I don't disagree that..."',
  clickbait: '',
  textLength: '1072',
  alias: '82',
  externalUrl: '',
  sortChildrenBy: 'recentFirst',
  hasVote: 'false',
  voteType: '',
  votesAnonymous: 'false',
  editCreatorId: 'EliezerYudkowsky',
  editCreatedAt: '2015-06-19 19:18:34',
  pageCreatorId: 'EliezerYudkowsky',
  pageCreatedAt: '2015-06-19 19:18:34',
  seeDomainId: '0',
  editDomainId: 'EliezerYudkowsky',
  submitToDomainId: '0',
  isAutosave: 'false',
  isSnapshot: 'false',
  isLiveEdit: 'true',
  isMinorEdit: 'false',
  indirectTeacher: 'false',
  todoCount: '0',
  isEditorComment: 'false',
  isApprovedComment: 'true',
  isResolved: 'false',
  snapshotText: '',
  anchorContext: '',
  anchorText: '',
  anchorOffset: '0',
  mergedInto: '',
  isDeleted: 'false',
  viewCount: '1626',
  text: 'Paul, I don't disagree that we want the AI to think whatever thought it ought to think.  I'm proposing a chicken-and-egg problem where the AI can't figure out which thoughts constitute mindcrime, without already committing mindcrime.  I think you could record a lot of English pontification from me and still have a non-person-simulating AI feeling pretty confused about what the heck I meant or how to apply it to computer programs.  Can you give a less abstract view of how you think this problem should be solved?  What human-understanding and mindcrime-detection abilities do you think the AI can develop, in what order, without committing lots of mindcrime along the way?  Sure, given infinite human understanding, the AI can detect mindcrime very efficiently, but the essence of the problem is that it seems hard to get infinite human understanding without lots of mindcrime being committed along the way.  So what is it you think can be done instead, that postulates only a level of human understanding that you think can be done knowably without simulating people?',
  metaText: '',
  isTextLoaded: 'true',
  isSubscribedToDiscussion: 'false',
  isSubscribedToUser: 'false',
  isSubscribedAsMaintainer: 'false',
  discussionSubscriberCount: '0',
  maintainerCount: '0',
  userSubscriberCount: '0',
  lastVisit: '2016-02-26 20:11:53',
  hasDraft: 'false',
  votes: [],
  voteSummary: 'null',
  muVoteSummary: '0',
  voteScaling: '0',
  currentUserVote: '-2',
  voteCount: '0',
  lockedVoteType: '',
  maxEditEver: '0',
  redLinkCount: '0',
  lockedBy: '',
  lockedUntil: '',
  nextPageId: '',
  prevPageId: '',
  usedAsMastery: 'false',
  proposalEditNum: '0',
  permissions: {
    edit: {
      has: 'false',
      reason: 'You don't have domain permission to edit this page'
    },
    proposeEdit: {
      has: 'true',
      reason: ''
    },
    delete: {
      has: 'false',
      reason: 'You don't have domain permission to delete this page'
    },
    comment: {
      has: 'false',
      reason: 'You can't comment in this domain because you are not a member'
    },
    proposeComment: {
      has: 'true',
      reason: ''
    }
  },
  summaries: {},
  creatorIds: [
    'EliezerYudkowsky'
  ],
  childIds: [],
  parentIds: [
    'mindcrime',
    '78'
  ],
  commentIds: [],
  questionIds: [],
  tagIds: [],
  relatedIds: [],
  markIds: [],
  explanations: [],
  learnMore: [],
  requirements: [],
  subjects: [],
  lenses: [],
  lensParentId: '',
  pathPages: [],
  learnMoreTaughtMap: {},
  learnMoreCoveredMap: {},
  learnMoreRequiredMap: {},
  editHistory: {},
  domainSubmissions: {},
  answers: [],
  answerCount: '0',
  commentCount: '0',
  newCommentCount: '0',
  linkedMarkCount: '0',
  changeLogs: [
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '266',
      pageId: '82',
      userId: 'AlexeiAndreev',
      edit: '1',
      type: 'newParent',
      createdAt: '2015-10-28 03:46:51',
      auxPageId: 'mindcrime',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '16',
      pageId: '82',
      userId: 'AlexeiAndreev',
      edit: '1',
      type: 'newParent',
      createdAt: '2015-10-28 03:46:51',
      auxPageId: '78',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '1270',
      pageId: '82',
      userId: 'EliezerYudkowsky',
      edit: '1',
      type: 'newEdit',
      createdAt: '2015-06-19 19:18:34',
      auxPageId: '',
      oldSettingsValue: '',
      newSettingsValue: ''
    }
  ],
  feedSubmissions: [],
  searchStrings: {},
  hasChildren: 'false',
  hasParents: 'true',
  redAliases: {},
  improvementTagIds: [],
  nonMetaTagIds: [],
  todos: [],
  slowDownMap: 'null',
  speedUpMap: 'null',
  arcPageIds: 'null',
  contentRequests: {}
}