{
"resetEverything": true,
"user": {
"id": "",
"firstName": "",
"lastName": "",
"lastWebsiteVisit": "",
"isSubscribed": false,
"domainMembershipMap": {},
"fbUserId": "",
"email": "",
"isAdmin": false,
"emailFrequency": "",
"emailThreshold": 0,
"ignoreMathjax": false,
"showAdvancedEditorMode": false,
"isSlackMember": false,
"analyticsId": "aid:dhrxAIni4YdTngwDQJOTmfI2gyP5lR9mP8L+kL1XU8I",
"hasReceivedMaintenanceUpdates": false,
"hasReceivedNotifications": false,
"newNotificationCount": 0,
"newAchievementCount": 0,
"maintenanceUpdateCount": 0,
"invitesClaimed": [],
"mailchimpInterests": {},
"continueBayesPath": null,
"continueLogPath": null
},
"pages": {
"3": {
"likeableId": "1919",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "3",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "group",
"title": "Paul Christiano",
"clickbait": "",
"textLength": 106,
"alias": "PaulChristiano",
"externalUrl": "",
"sortChildrenBy": "alphabetical",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "3",
"editCreatedAt": "2015-09-04 16:14:58",
"pageCreatorId": "3",
"pageCreatedAt": "2015-09-04 16:14:58",
"seeDomainId": "0",
"editDomainId": "705",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 187,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"14z": {
"likeableId": "139",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "14z",
"edit": 7,
"editSummary": "updating",
"prevEdit": 6,
"currentEdit": 7,
"wasPublished": true,
"type": "wiki",
"title": "Arbital domain",
"clickbait": "What is a domain? Why is it important?",
"textLength": 1145,
"alias": "arbital_domain",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-07-21 23:31:57",
"pageCreatorId": "1",
"pageCreatedAt": "2015-10-10 22:31:00",
"seeDomainId": "0",
"editDomainId": "3",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 83,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"15w": {
"likeableId": "164",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "15w",
"edit": 5,
"editSummary": "",
"prevEdit": 4,
"currentEdit": 5,
"wasPublished": true,
"type": "wiki",
"title": "Machine Intelligence Research Institute",
"clickbait": "Where to work if you're doing more formal or technical work on AI safety, of a kind not easily milked for publications.",
"textLength": 799,
"alias": "MIRI",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2",
"editCreatedAt": "2015-12-23 21:16:07",
"pageCreatorId": "1",
"pageCreatedAt": "2015-10-26 22:59:19",
"seeDomainId": "0",
"editDomainId": "7",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 165,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1sh": {
"likeableId": "733",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 1,
"dislikeCount": 0,
"likeScore": 1,
"individualLikes": [],
"pageId": "1sh",
"edit": 3,
"editSummary": "",
"prevEdit": 2,
"currentEdit": 3,
"wasPublished": true,
"type": "wiki",
"title": "Paul Christiano's AI control blog",
"clickbait": "Speculations on the design of safe, efficient AI systems.",
"textLength": 186,
"alias": "paul_ai_control",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "3",
"editCreatedAt": "2016-02-03 03:19:41",
"pageCreatorId": "3",
"pageCreatedAt": "2016-01-30 00:23:25",
"seeDomainId": "0",
"editDomainId": "705",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 79,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1tp": {
"likeableId": "768",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "1tp",
"edit": 3,
"editSummary": "",
"prevEdit": 2,
"currentEdit": 3,
"wasPublished": true,
"type": "wiki",
"title": "Delegating to a mixed crowd",
"clickbait": "",
"textLength": 4502,
"alias": "Delegating_mixed_crowd",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "3",
"editCreatedAt": "2016-02-20 02:43:53",
"pageCreatorId": "3",
"pageCreatedAt": "2016-02-02 00:41:26",
"seeDomainId": "0",
"editDomainId": "705",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 14,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1tq": {
"likeableId": "769",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "1tq",
"edit": 4,
"editSummary": "",
"prevEdit": 3,
"currentEdit": 4,
"wasPublished": true,
"type": "wiki",
"title": "Learning and logic",
"clickbait": "",
"textLength": 24677,
"alias": "learning_logic",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "3",
"editCreatedAt": "2016-03-04 00:30:26",
"pageCreatorId": "3",
"pageCreatedAt": "2016-02-02 01:36:27",
"seeDomainId": "0",
"editDomainId": "705",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 27,
"text": "\n\nIn most machine learning tasks, the learner maximizes a concrete, empirical performance measure: in supervised learning the learner maximizes its classification accuracy, in reinforcement learning the learner maximizes its reward. In order to maximize this reward, the learner has to be able to observe or compute it.\n\nBut sometimes we want our learner to discover some interesting fact about the world — e.g. to find the mass of the Higgs boson — and we have no external check to tell us whether it has succeeded.\n\nSolving problems where we can’t tie success directly to observations seems quite difficult at the moment. And we can’t just throw bigger computers and more data at them without doing a bunch of _ad hoc_ thinking or finding some new insight. So, relatively speaking, these tasks seem to be getting harder over time.\n\nFrom an AI control perspective this is an important problem. In the long run, we really want to use machines for tasks where we can’t define success as a simple function of observations.\n\n### Where logic comes in\n\nReasoning about logic is one of the simplest possible examples of this challenge. Logic lets us state a goal very precisely, in a very simple language with very simple semantics. Yet, for now, I don’t think that we have effective techniques for pursuing symbolically defined goals.\n\n### The challenge\n\nAs a toy example, consider a program _f_ that is simple to define but prohibitively difficult to evaluate. _f_ takes two binary arguments, and outputs either 0 or 1. Given an input _x_, we would like to find an input _y_ such that _f_(_x, y_) = 1.\n\nThis problem is very closely related to estimating the probabilities of the form “_f_(_x, y_) = 1.”\n\nIf _f_ is easy to evaluate, then we can treat this as a standard reinforcement learning problem. But as _f_ gets more complicated, this becomes impractical, and we need some new technique.\n\nI don’t know any reasonable algorithm for this problem.\n\nNote that the _goal_ is entirely logical, but the actual problem need not be purely abstract. Even an agent with a simple logical goal can benefit from having lots of data about the world. For a very simple example, you might learn that a calculator is a useful guide to facts about arithmetic. I think that using such observations is quite important.\n\n### Standards\n\nWhat do we mean by “reasonable algorithm”? We don’t necessarily need to pin this down — better algorithms are better— but if I want to argue that there is an important gap in our knowledge, I need to say something about what we don’t yet know.\n\nI’m interested in _frameworks_ for symbolic reasoning, that combine available building blocks to solve the problem. Then the goal is scalable frameworks that can effectively exploit continuing improvements in optimization algorithms, or increased hardware, or conceptual advances AI.\n\nSome desiderata:\n\n- Whatever level of performance on logical tasks we can achieve implicitly in the process of solving RL or supervised learning problems, we ought to be able to achieve similar performance on the logical problems themselves. For example, if our reinforcement learner can form a plan in an environment, then our logical reasoner ought to be able to solve an analogous constraint satisfaction problem. If our reinforcement learner can argue persuasively that a theorem is true in order to win a reward, then our logical reasoner ought to be able to assign high probability to it.\n- Similarly, if we can achieve human-level performance on all RL problems, including complex problems requiring the full range of human abilities, we ought to be able to compute probabilities that are as accurate as those assigned by a human.\n\nThese standards are very imprecise (e.g. what does it mean for an RL problem to “implicitly” require solving some logical task?), but hopefully it gives a sense of what I am after.\n\nI think that we can’t meet this requirement yet, certainly not in a way that will continue to hold as underlying optimization algorithms and computational hardware improve. (See the next section on inadequate approaches.)\n\n### Why logic is especially interesting\n\nLogic isn’t just the simplest toy example; it is also an extremely expressive language. With enough additional work I think that we might be able to [define a reasonable proxy for our actual preferences](https://ordinaryideas.wordpress.com/2012/04/21/indirect-normativity-write-up/) as a logical expression. (Though like most people I expect it will be practically easier to use a language that can easily represent things like “the user,” which are kind of a mouthful in formal logic.) The problem is “merely” that the logical definition is hopelessly complex.\n\nWhether or not you buy this particular argument, I think that much of the “hard part” of reasoning symbolically already appears in the context of reasoning about very complex logical expressions. Thinking about logic simplifies the general problem of symbolic reasoning, by providing us with semantics “for free.” But I think we are still left with a very important problem.\n\nSome inadequate responses to the challenge\n==========================================\n\n### Logic as a representation\n\nI can already build a theorem-proving system, that analyzes a sentence φ by searching for proofs of φ. I can maybe even [up the ante](https://intelligence.org/files/Non-Omniscience.pdf) by assigning probabilities to sets of sentences, and defining procedures for updating these probabilities on “logical observations.”\n\nThese approaches lag radically behind the current state of the art for supervised learning.\n\nOne basic problem is that logic is the language of our problem statement, and logical deduction is indeed powerful, but it is often a _terrible_ internal representation. For example, if I am told some facts about a linear order on X, Y, Z, I should probably represent those facts by putting X, Y, Z on a line rather than by explicitly representing every inequality.\n\nWe would really like to design algorithms that can efficiently learn whatever internal representation is most effective. Similarly, we’d like to allow our algorithms to learn what approach to logical inference is most appropriate. And in general, approaches which embed logical structure via hand-coded rules (and then lean on those rules to actually do meaningful computational work) look like they may be on the wrong side of the history.\n\nMoreover, if we are searching for a scalable framework, these approaches obviously won’t cut if. At best we will end up with a “race” between algorithms for logical reasoning and other AI systems.\n\n### Transfer learning\n\nA second approach is to treat logical reasoning as a supervised learning problem. That is, we can sample sentences φ, ask our learner to guess whether they are true, and then adjust the model to assign higher probability to the correct guess (e.g. to maximize log score).\n\nThe key difficulty with this approach is that we can only train on sentences φ which are sufficiently simple that we can actually tell whether they are true or false.\n\nIn order to apply the learned model to complex sentences, we need to rely on a strong form of transfer learning. Namely, we need to take a model which has had _zero_ training on sentences that are too-complex-to-evaluate, and trust it to perform well on such sentences. I am somewhat skeptical about expecting learning algorithms to reliably generalize to a new domain where it is impossible to even tell whether they are generalizing correctly.\n\nIdeally we would be able to train our algorithm on exactly the kinds of sentences that we actually cared about. But this easy vs. hard distinction probably means that we would have to train our system exclusively on much easier toy samples.\n\nI think that this kind of generalization is plausible for simple functions (e.g. multiplication). But assigning probabilities to logical sentences is definitely _not_ a simple function; it draws on a wide range of cognitive capabilities, and the actual estimator is extremely complex and messy. I would be completely unsurprised to find that many models which perform well on easy-to-assess sentences have pathological behavior when extended to very challenging sentences.\n\nAt some point I might be convinced that AI control inherently needs to rest on assumptions about transfer learning — that we have no hope but to hope that learned functions generalize in the intended way to unforeseen situations. But I haven’t yet given up — for now, I still think that we can solve the problem without any leaps of faith.\n\nPragmatically, if we wanted to train a function to estimate the truth of complex sentences, we might train it on our “best guesses” about the truth of complex sentences that we couldn’t answer exactly. But we’ll end up with a supervised learning system that estimates our best guesses about logical facts. This doesn’t really buy us anything from a control perspective.\n\nA preliminary approach\n======================\n\nI’m going to describe an extremely preliminary approach to this problem. It seems far from satisfactory; my purpose is mostly to raise the question and show that we can get at least a little bit of traction on it.\n\n### The scheme\n\nWe’ll train a function _P_ to assign probabilities to logical sentences. For simplicity we’ll work with a language that has constant, function, and relation symbols, variables, and no quantifiers. Variables are assumed to be universally quantified.\n\n(I’m not really going to talk about how the function is trained or what class of models is used. I just want to use _P_ as a black box for the online learning problem I’m going to describe. For concreteness you could imagine training a neural network to recursively build a constant-sized vector representation of formulas or terms by combining representations for each subexpression. Probably an algorithm which could actually handle this problem would need to advance the state of the art in several important ways.)\n\nAt the same time we will train a reinforcement learner _A_ to produce “challenges” to _P_: _A_’s goal is to identify inconsistencies in _P_’s probabilities.\n\nI’ll also assume we have some _observations_ φᵢ, logical facts which are observed to be true. Over time the set of observations will grow.\n\nI’ll allow four kinds of challenges from _A_, corresponding to four kinds of possible inconsistencies.\n\n1. Given any pair of sentences φ, ψ, a consistent assignment of probabilities to sentences should have: _P_(φ) = _P_(φ ∧ ψ) + _P_(φ ∧_¬_ψ).\n2. Given any sentence φ with a free variable _x_, and any term _t_, we should have _P_(φ ∧ φ[_x_ := _t_]) = _P_(φ).\n3. Given any sentence φ and a sentence ψ which is “obviously” equivalent to on φ, we should have _P_(φ) = _P_(ψ). I won’t define “obviously,” but we could use the notion of _trivial equivalence_ from [here](https://intelligence.org/files/Non-Omniscience.pdf).\n4. Given any observation φᵢ, we should have _P_(φᵢ) = 1.\n\n_A_’s goal is to produce a pair of sentences, or a sentence and a term, such that _P_ violates one of these constraints.\n\nIt turns out that these constraints are universal: If _P_ doesn’t violate any of these constraints, then we can prove that _P_’s assignments actually correspond to some distribution over models consistent with observation. In reality, _P_ will never converge to a distribution that satisfies all of these constraints.\n\nFormally, _A_ and _P_ play the following game:\n\n- _A_ chooses a consistency check from one of categories 1–4 above. We may put some limits on what sentences it can use in a consistency check — for example, to implement curriculum learning, we may initially limit _A_ to providing short sentences.\n- _P_ assigns probabilities to each sentence referenced in the consistency check. (The same program is used to assign a probability to each sentence. Intuitively, separate copies of _P_ independently assign a probability to each sentence.)\n- If _P_’s probabilities are inconsistent, then we penalize _P_ (and reward _A_). A natural choice for penalty is the total KL divergence from _P_’s probabilities to the closest consistent set of probabilities.\n\n_A_ is trained to maximize _P_’s penalty in the next round (i.e. without concern for effects on future rounds), and _P_ is trained to minimize its penalty.\n\n### Example: Only observations\n\nIf _A_ only ever makes challenges of type 4 — enforcing consistency with an observation — then _P_ is free to ignore logical structure. In this case, the procedure corresponds to supervised learning. So at least we have successfully subsumed the simple supervised learning approach.\n\nAll of the system’s ability to reason about complex sentences is coming from the consistency checks.\n\nThe consistency mechanism is more general than the observations. For example, by carrying out the steps of a computation one by one, _A_ can force _P_ to be correct about the result of that computation. The observations are only relevant if either there are constant symbols in the language, or we are relying on the environment to do interesting computations.\n\nSo, even if we left out the observations, as long as _A_ followed an appropriate strategy, this system would still subsume the simple supervised learning approach. (_A_’s strategy is obviously very important, see the section “Problem: Relevance” below.)\n\n### Example: No structure\n\n_P_ is free to ignore all structure of logical sentences, and only use the constraints implied by _A_’s challenges. For example, _P_ could use the following procedure:\n\nNotice that each constraint is linear, so that the set of constraints appearing _A_’s challenges form a polytope (which is simply the whole space [0, 1] in any coordinate that hasn’t yet appeared in a constraint). _P_ can track each of these constraints, and in each round output the appropriate coordinate of the centroid of this polytope.\n\n(This basically looks like constraint generation, though it’s not going to go anywhere good ever because _P_ can never converge — see the next section.)\n\nOn this model, _P_ and _A_ together are essentially doing elementary logical inference. The whole definition of the system resides in _A_’s choices about what to explore, which is playing the role of the proposer in a proof search.\n\n### Problem: Relevance\n\n\nThere will always be inconsistencies in _P_’s probabilities, and _A_ will always be able to find some of them. So _P_ can never really win the game, and the training will continuously patch new problems identified by _A_ rather than ever converging. Our only guarantee will be that _P_ is consistent _for the kinds of questions that A prefers to ask_.\n\nSo it really matters that _A_ asks relevant questions. But so far we haven’t said anything about that, we have just given _A_ the goal of identifying inconsistencies in _P_’s view. I think that this is the most important deficiency in the scheme — without correcting it, the whole thing is useless.\n\nFor simplicity, let’s assume that we are ultimately interested in a particular sentence φ. We would like _A_ to focus on questions that are most relevant to φ, such that if _P_ is consistent on these sentences then it is especially likely to have a reasonable view about φ.\n\nA crude approach is to simply reward _A_ for asking questions which are correlated with φ (according to _P_). For example, when _P_ is penalized on some sentence we can reward _A_ according to the product \\[how much _P_’s beliefs about ψ had to move to be consistent] \\[the mutual information between ψ and φ, according to _P_]. The hope is that questions which are relevant to φ will be correlated with φ, and so _A_ will focus its attention on _P_’s most relevant errors. But there is no real principled reason to think that this would work.\n\nAlternatively, we could train a relevance function _V_ in parallel with _A_ and _P_. The simplest instantiation of this idea might be to require _V_(φ) = 1, and to require that _V_(ψ) be large whenever ψ is logically related, or perhaps has high mutual information under _P_, to another sentence with a high relevance. (and to otherwise exert downward pressure on _V_). We could then reward _A_ for choosing highly relevant sentences. This has a similar intuitive motivation, but it also lacks any principled justification.\n\nAnother crude measure is to reward _A_ for identifying errors involving simple sentences, so that _P_ will be roughly consistent whenever we talk about simple sentences, and it will “notice” any arguments that involve only simple sentences. This can’t be a substitute for relevance though, since it requires _P_ to notice _all_ of these arguments rather than allowing it to focus on the relevant ones.\n\nI don’t see any easy way to deal with this problem. That may mean that this approach to logical reasoning is doomed. Or it just might mean that we need a clever idea — I think there is a lot to try.\n\n### Problem: going beyond logic\n\n\nIn this scheme, consistency checks are limited to logical consistency conditions. In some sense these conditions are universal if we only care about finite objects. But they may be less powerful than other kinds of inference.\n\nOf course, _A_ and _P_ can learn strategies that reflect more complex regularities. For example, _P_ can learn that probabilistic methods usually work, and thereafter use probabilistic methods to guess whether a sentence is true. And _A_ can learn that probabilistic methods usually work, and thereafter use them to identify probable inconsistencies in _P_’s views.\n\nBut these other methods of inference can’t be used to generate extra constraints on _P_’s beliefs, and that may mean that the resulting beliefs are less accurate than human beliefs (even if _P_ is much better at reinforcement learning than a human).\n\nIt’s not clear whether this is a big problem.\n\nTo see an example where this looks like it could be a problem, but actually isn’t: consider an agent reasoning about arithmetic in without logical induction. Suppose that _P_ assigns a high probability to ∀_n_: φ(_n_) → φ(_n_+1), and assigns a high probability to φ(0), yet assigns a low probability to φ(1000000). At face value, _A_ has no way to prove that _P_ is inconsistent. Thus _P_ might be able to persist in these inconsistent beliefs, even if _P_ and _A_ are both good enough learners that they would be able to figure out the induction is useful.\n\nBut _A_ can use induction in order to identify a problem in _P_’s beliefs, by doing a binary search to find a point where _P_ has different beliefs about φ(_n_) and φ(_n_+1), even conditioned on ∀_n_: φ(_n_) → φ(_n_+1).\n\nSo in fact we didn’t need to include induction as an inference rule for _A_, it falls naturally out of the rules of the game. (You might complain that the size of _P_’s inconsistency is significantly decreased, but a more clever strategy for _A_ can ensure that the reduction is at-most-linear.)\n\nIt seems like this is probably a happy coincidence, distinctive to induction. In general, we can probably find axioms such that _P_ doesn’t lose anything by simply violating them. Even if _P_ “can tell” that such an axiom is true, but has no incentive to assign it a high probability. Similarly, using that axiom _A_ can identify likely inconsistencies in _P_’s beliefs, but has no way to quickly demonstrate an inconsistency.\n\nAnd of course, _A_ and _P_ probably make judgments that don’t correspond to any axioms at all, e.g. based on statistical regularities or alternative representations that give probabilistic clues.\n\nIn some sense, the problem is that we have a “dumb” arbiter, who only accepts proofs of inconsistency of a very restricted kind. If we want our system to learn to give human-level judgments, we need to either:\n\n- Show that such a “dumb” arbiter is sufficient, and provides essentially the maximal possible pressure on _P_’s beliefs.\n- Change the setup to make the arbiter’s job easier, so that a dumb arbiter can do it after all.\n- Describe a more clever set of consistency checks which is in some sense universal and lets _A_ make any kind of inconsistency argument that _A_ might want to make. This is in some sense analogous to specifying _P_ directly rather than using learning, but maybe the arbiter’s job is much easier than the learner’s job.\n- Allow the arbiter itself to learn without compromising correctness.\n\nA natural way to get around this problem is to use human evaluations to train an arbiter to evaluate consistency. This also allows us to give _P_ a much larger class of questions (any questions that are meaningful to a human). In some sense this seems quite promising, but it introduces a few difficulties:\n\n- From a safety perspective, if _A_ is very powerful then we have reintroduced the kind of adversarial dynamic that symbolic reasoning may have let us avoid (since now _A_ is incentivized to manipulate or deceive the human into judging in its favor). This might not be a critical failure; for example, a weak _A_ and _P_ can be used to build a more powerful human-aligned agent (which can then play the role of _A_ or _P_ in a still more powerful system, _etc._)\n- Practically, logic is convenient because consistency is all-or-nothing, and so we don’t have to worry about quantitatively weighing up different partial inconsistencies. Once we move to a more realistic domain, this becomes a critical issue. It looks quite challenging.\n\nThis problem is not as clear a deal-breaker as the issue with relevance discussed in the last section. But it seems like a more fundamental problem, and so maybe worth attacking first.\n\nRelated work\n============\n\nI am not aware of any existing work which tries to handle logical reasoning in what I’m calling a “scalable” way.\n\nThere is a literature on probabilistic logical reasoning, but mostly it fits in the category of “logic as a representation” above. This work mostly isn’t aiming to build systems that scale as effectively supervised learning. The flavor of the work ends up being very different.\n\nThere is a much smaller literature applying machine learning to this kind of logical problem. What work there is has been very happy to focus on the supervised learning approach, explicitly restricting attention to “easy” sentences where we can easily compute the ground truth or the quality of a proposed solution.\n\nOne reason for the lack of practical work is that existing machine learning techniques aren’t really strong enough for it to seem worthwhile. My guess is that the situation will change and is already starting to change, but for now there isn’t too much.\n\nResearchers at [MIRI](https://intelligence.org/) have thought about these questions at some length, and I thought about them a few years ago, but from a different angle (and with a different motivation). They have instead been focusing on finding improvements to existing intractable or impractical algorithms. Even in the infinite computing case we don’t have especially good models of how to solve this problem.\n\nI’m now approaching the problem from a different angle, with a focus on efficacy rather than developing a “clean” theory of reasoning under logical uncertainty, for a few reasons:\n\n1. It’s not clear to me there is any clean theory of reasoning under logical uncertainty, and we already have a mediocre theory. It’s no longer obvious what additional theorems we want. This seems bad (though certainly not fatal).\n2. It is pretty clear that there needs to be a more effective approach to symbolic reasoning, if it is to play any practical role in AI systems. So we know what the problem is.\n3. The scalable symbolic reasoning problem looks much more important if AI control becomes a serious issue soon. Trying to solve it also looks like it will yield more useful information (in particular, this is probably the main uncertainty about the role of logic in practical AI systems).\n4. Given that we understand the constraints from efficacy, and don’t understand the constraints from having a clean theory, I think that thinking about efficacy is more likely to improve our thinking about the clean theory than vice versa.",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 1,
"maintainerCount": 1,
"userSubscriberCount": 0,
"lastVisit": "2016-02-09 06:07:39",
"hasDraft": false,
"votes": [],
"voteSummary": null,
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [
"3"
],
"childIds": [],
"parentIds": [
"1sh"
],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "8262",
"pageId": "1tq",
"userId": "1s6",
"edit": 4,
"type": "newEdit",
"createdAt": "2016-03-04 00:30:26",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "7763",
"pageId": "1tq",
"userId": "1s6",
"edit": 3,
"type": "newEdit",
"createdAt": "2016-02-24 23:26:54",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "6893",
"pageId": "1tq",
"userId": "1s6",
"edit": 2,
"type": "newEdit",
"createdAt": "2016-02-11 23:23:50",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "6080",
"pageId": "1tq",
"userId": "1s6",
"edit": 1,
"type": "newEdit",
"createdAt": "2016-02-02 01:36:27",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "6078",
"pageId": "1tq",
"userId": "1s6",
"edit": 0,
"type": "newParent",
"createdAt": "2016-02-02 01:18:13",
"auxPageId": "1sh",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "6076",
"pageId": "1tq",
"userId": "1s6",
"edit": 0,
"type": "deleteParent",
"createdAt": "2016-02-02 01:18:09",
"auxPageId": "1tp",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "6073",
"pageId": "1tq",
"userId": "1s6",
"edit": 0,
"type": "newParent",
"createdAt": "2016-02-02 01:17:03",
"auxPageId": "1tp",
"oldSettingsValue": "",
"newSettingsValue": ""
}
],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"3d": {
"likeableId": "2273",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "3d",
"edit": 33,
"editSummary": "",
"prevEdit": 32,
"currentEdit": 33,
"wasPublished": true,
"type": "wiki",
"title": "Arbital",
"clickbait": "Arbital is the place for crowdsourced, intuitive math explanations.",
"textLength": 5201,
"alias": "Arbital",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1",
"editCreatedAt": "2016-08-08 16:07:52",
"pageCreatorId": "1",
"pageCreatedAt": "2015-03-30 22:19:47",
"seeDomainId": "0",
"editDomainId": "8",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 2323,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"3hs": {
"likeableId": "2499",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "3hs",
"edit": 19,
"editSummary": "added link to exemplar pages",
"prevEdit": 18,
"currentEdit": 19,
"wasPublished": true,
"type": "wiki",
"title": "Author's guide to Arbital",
"clickbait": "How to write intuitive, flexible content on Arbital.",
"textLength": 4420,
"alias": "author_guide_to_arbital",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-08-08 14:32:40",
"pageCreatorId": "1",
"pageCreatedAt": "2016-05-10 17:55:35",
"seeDomainId": "0",
"editDomainId": "3",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 313,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"4yg": {
"likeableId": "2907",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "4yg",
"edit": 9,
"editSummary": "",
"prevEdit": 8,
"currentEdit": 9,
"wasPublished": true,
"type": "wiki",
"title": "Arbital quality",
"clickbait": "Arbital's system for tracking page quality.",
"textLength": 721,
"alias": "arbital_quality",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-07-16 01:23:21",
"pageCreatorId": "1yq",
"pageCreatedAt": "2016-06-30 02:21:25",
"seeDomainId": "0",
"editDomainId": "3",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 102,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"4ym": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "4ym",
"edit": 2,
"editSummary": "",
"prevEdit": 1,
"currentEdit": 2,
"wasPublished": true,
"type": "wiki",
"title": "Unassessed",
"clickbait": "This page's quality has not been assessed.",
"textLength": 134,
"alias": "unassessed_meta_tag",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-06-30 04:35:45",
"pageCreatorId": "1yq",
"pageCreatedAt": "2016-06-30 02:32:42",
"seeDomainId": "0",
"editDomainId": "3",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 46,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"58l": {
"likeableId": "3060",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "58l",
"edit": 8,
"editSummary": "",
"prevEdit": 6,
"currentEdit": 8,
"wasPublished": true,
"type": "wiki",
"title": "Arbital user groups",
"clickbait": "Users can attain different powers and responsibilities on Arbital.",
"textLength": 2344,
"alias": "arbital_user_groups",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-07-19 00:24:53",
"pageCreatorId": "1yq",
"pageCreatedAt": "2016-07-09 00:06:07",
"seeDomainId": "0",
"editDomainId": "3",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 95,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
}
},
"edits": {},
"users": {
"1": {
"id": "1",
"firstName": "Alexei",
"lastName": "Andreev",
"lastWebsiteVisit": "2018-02-18 09:35:21",
"isSubscribed": false,
"domainMembershipMap": {}
},
"2": {
"id": "2",
"firstName": "Eliezer",
"lastName": "Yudkowsky",
"lastWebsiteVisit": "2018-12-31 03:31:07",
"isSubscribed": false,
"domainMembershipMap": {}
},
"3": {
"id": "3",
"firstName": "Paul",
"lastName": "Christiano",
"lastWebsiteVisit": "2017-07-07 03:33:20",
"isSubscribed": false,
"domainMembershipMap": {}
},
"1s6": {
"id": "1s6",
"firstName": "Jessica",
"lastName": "Chuan",
"lastWebsiteVisit": "2016-03-05 00:33:18",
"isSubscribed": false,
"domainMembershipMap": {}
},
"1yq": {
"id": "1yq",
"firstName": "Eric",
"lastName": "Bruylant",
"lastWebsiteVisit": "2017-04-14 18:00:22",
"isSubscribed": false,
"domainMembershipMap": {}
}
},
"domains": {
"3": {
"id": "3",
"pageId": "3d",
"createdAt": "2015-03-30 22:19:47",
"alias": "Arbital",
"canUsersComment": false,
"canUsersProposeComment": true,
"canUsersProposeEdits": true,
"friendDomainIds": []
},
"7": {
"id": "7",
"pageId": "15w",
"createdAt": "2015-10-26 22:59:19",
"alias": "MIRI",
"canUsersComment": false,
"canUsersProposeComment": true,
"canUsersProposeEdits": true,
"friendDomainIds": []
},
"705": {
"id": "705",
"pageId": "3",
"createdAt": "2015-02-10 23:46:17",
"alias": "PaulChristiano",
"canUsersComment": false,
"canUsersProposeComment": true,
"canUsersProposeEdits": true,
"friendDomainIds": []
}
},
"masteries": {
"1tq": {
"pageId": "1tq",
"has": false,
"wants": false,
"level": 0,
"updatedAt": ""
}
},
"marks": {},
"pageObjects": {},
"result": {
"primaryPageId": "1tq"
},
"globalData": {
"privateDomain": {
"id": "0",
"pageId": "",
"createdAt": "",
"alias": "",
"canUsersComment": false,
"canUsersProposeComment": false,
"canUsersProposeEdits": false,
"friendDomainIds": []
},
"improvementTagIds": [
"15r",
"15s",
"3rk",
"3wq",
"433",
"4d3",
"4lg",
"4pt",
"4w9",
"4ym",
"54j",
"55s",
"5cb",
"5gr",
"5sv",
"5t7",
"5tb",
"5v6",
"5xq",
"72"
]
}
}