Page MenuHomePhorge

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/src/docs/book/contributor.book b/src/docs/book/contributor.book
index 10db63c011..1d0386251f 100644
--- a/src/docs/book/contributor.book
+++ b/src/docs/book/contributor.book
@@ -1,36 +1,36 @@
{
- "name": "phabcontrib",
- "title": "Phabricator Contributor Documentation",
- "short": "Phabricator Contributor Docs",
- "preface": "Information for Phabricator contributors and developers.",
+ "name": "contrib",
+ "title": "Phorge Contributor Documentation",
+ "short": "Contributor Docs",
+ "preface": "Information for Phorge contributors and developers.",
"root": "../../../",
"uri.source":
- "https://secure.phabricator.com/diffusion/P/browse/master/%f$%l",
+ "https://we.phorge.it/diffusion/P/browse/master/%f$%l",
"rules": {
"(\\.diviner$)": "DivinerArticleAtomizer"
},
"exclude": [
"(^externals/)",
"(^resources/)",
"(^scripts/)",
"(^src/docs/flavor/)",
"(^src/docs/tech/)",
"(^src/docs/user/)",
"(^support/)",
"(^webroot/rsrc/externals/)"
],
"groups": {
"contrib": {
"name": "Contributor Overview"
},
"detail": {
"name": "Contributing in Detail"
},
"developer": {
"name": "Developer Guides"
},
"standards": {
"name": "Coding Standards"
}
}
}
diff --git a/src/docs/book/flavor.book b/src/docs/book/flavor.book
index 978244f19d..4404e94bf7 100644
--- a/src/docs/book/flavor.book
+++ b/src/docs/book/flavor.book
@@ -1,42 +1,39 @@
{
- "name": "phabflavor",
- "title": "Phabricator Flavor Text",
+ "name": "flavor",
+ "title": "Phorge Flavor Text",
"short": "Flavor Text",
- "preface": "Recommendations, lore, and dark rituals.",
+ "preface": "A collection of short articles which pertain to software development in general, not necessarily to Phorge specifically.",
"root": "../../../",
"uri.source":
- "https://secure.phabricator.com/diffusion/P/browse/master/%f$%l",
+ "https://we.phorge.it/diffusion/P/browse/master/%f$%l",
"rules": {
"(\\.diviner$)": "DivinerArticleAtomizer"
},
"exclude": [
"(^externals/)",
"(^resources/)",
"(^scripts/)",
"(^src/docs/contributor/)",
"(^src/docs/tech/)",
"(^src/docs/user/)",
"(^support/)",
"(^webroot/rsrc/externals/)"
],
"groups": {
- "overview": {
- "name": "Overview"
- },
"javascript": {
"name": "Javascript"
},
"lore": {
- "name": "Phabricator Lore"
+ "name": "Phorge Lore"
},
"php": {
"name": "PHP"
},
"review": {
"name": "Revision Control and Code Review"
},
"sundry": {
"name": "Sundries"
}
}
}
diff --git a/src/docs/book/phabricator.book b/src/docs/book/phorge.book
similarity index 97%
rename from src/docs/book/phabricator.book
rename to src/docs/book/phorge.book
index 2beef23023..d423482813 100644
--- a/src/docs/book/phabricator.book
+++ b/src/docs/book/phorge.book
@@ -1,343 +1,343 @@
{
- "name": "phabdev",
- "title": "Phabricator Technical Documentation",
- "short": "Phabricator Tech Docs",
- "preface": "Technical reference material for Phabricator developers.",
+ "name": "dev",
+ "title": "Phorge Technical Documentation",
+ "short": "Tech Docs",
+ "preface": "Technical reference material for Phorge developers.",
"root": "../../../",
"uri.source":
- "https://secure.phabricator.com/diffusion/P/browse/master/%f$%l",
+ "https://we.phorge.it/diffusion/P/browse/master/%f$%l",
"rules": {
"(\\.diviner$)": "DivinerArticleAtomizer",
"(\\.php$)": "DivinerPHPAtomizer"
},
"exclude": [
"(^externals/)",
"(^resources/)",
"(^scripts/)",
"(^src/docs/contributor/)",
"(^src/docs/flavor/)",
"(^src/docs/user/)",
"(^support/)",
"(^webroot/rsrc/externals/)"
],
"groups": {
"aphront": {
"name": "Aphront",
"include": "(^src/aphront/)"
},
"almanac": {
"name": "Almanac",
"include": "(^src/applications/almanac/)"
},
"aphlict": {
"name": "Aphlict",
"include": "(^src/applications/aphlict/)"
},
"arcanist": {
"name": "Arcanist Integration",
"include": "(^src/applications/arcanist/)"
},
"auth": {
"name": "Auth",
"include": "(^src/applications/auth/)"
},
"baseapp": {
"name": "Application Basics",
"include": "(^src/applications/base/)"
},
"cache": {
"name": "Cache",
"include": "(^src/applications/cache/)"
},
"calendar": {
"name": "Calendar",
"include": "(^src/applications/calendar/)"
},
"celerity": {
"name": "Celerity",
"include": "(^src/applications/celerity/)"
},
"chatlog": {
"name": "Chatlog",
"include": "(^src/applications/chatlog/)"
},
"conduit": {
"name": "Conduit",
"include": "(^src/applications/conduit/)"
},
"config": {
"name": "Config",
"include": "(^src/applications/config/)"
},
"conpherence": {
"name": "Conpherence",
"include": "(^src/applications/conpherence/)"
},
"console": {
"name": "Console",
"include": "(^src/applications/console/)"
},
"countdown": {
"name": "Countdown",
"include": "(^src/applications/countdown/)"
},
"customfield": {
"name": "Custom Fields",
"include": "(^src/infrastructure/customfield/)"
},
"daemon": {
"name": "Daemons",
"include": [
"(^src/applications/daemon/)",
"(^src/infrastructure/daemon/)"
]
},
"dashboard": {
"name": "Dashboard",
"include": "(^src/applications/dashboard/)"
},
"differential": {
"name": "Differential",
"include": "(^src/applications/differential/)"
},
"diffusion": {
"name": "Diffusion",
"include": "(^src/applications/diffusion/)"
},
"diviner": {
"name": "Diviner",
"include": "(^src/applications/diviner/)"
},
"doorkeeper": {
"name": "Doorkeeper",
"include": "(^src/applications/doorkeeper/)"
},
"draft": {
"name": "Draft",
"include": "(^src/applications/draft/)"
},
"drydock": {
"name": "Drydock",
"include": "(^src/applications/drydock/)"
},
"edges": {
"name": "Edges",
"include": "(^src/infrastructure/edges/)"
},
"events": {
"name": "Events",
"include": "(^src/infrastructure/events/)"
},
"fact": {
"name": "Fact",
"include": "(^src/applications/fact/)"
},
"feed": {
"name": "Feed",
"include": "(^src/applications/feed/)"
},
"files": {
"name": "Files",
"include": "(^src/applications/files/)"
},
"flag": {
"name": "Flags",
"include": "(^src/applications/flag/)"
},
"fund": {
"name": "Fund",
"include": "(^src/applications/fund/)"
},
"harbormaster": {
"name": "Harbormaster",
"include": "(^src/applications/harbormaster/)"
},
"help": {
"name": "Help",
"include": "(^src/applications/help/)"
},
"herald": {
"name": "Herald",
"include": "(^src/applications/herald/)"
},
"home": {
"name": "Home",
"include": "(^src/applications/home/)"
},
"legalpad": {
"name": "Legalpad",
"include": "(^src/applications/legalpad/)"
},
"lipsum": {
"name": "Lipsum",
"include": "(^src/applications/lipsum/)"
},
"macro": {
"name": "Macro",
"include": "(^src/applications/macro/)"
},
"maniphest": {
"name": "Maniphest",
"include": "(^src/applications/maniphest/)"
},
"meta": {
"name": "Applications",
"include": "(^src/applications/meta/)"
},
"metamta": {
"name": "MetaMTA",
"include": "(^src/applications/metamta/)"
},
"multimeter": {
"name": "Multimeter",
"include": "(^src/applications/multimeter/)"
},
"notification": {
"name": "Notifications",
"include": "(^src/applications/notification/)"
},
"nuance": {
"name": "Nuance",
"include": "(^src/applications/nuance/)"
},
"oauthserver": {
"name": "OAuth Server",
"include": "(^src/applications/oauthserver/)"
},
"owners": {
"name": "Owners",
"include": "(^src/applications/owners/)"
},
"passphrase": {
"name": "Passphrase",
"include": "(^src/applications/passphrase/)"
},
"paste": {
"name": "Paste",
"include": "(^src/applications/paste/)"
},
"people": {
"name": "People",
"include": "(^src/applications/people/)"
},
"phame": {
"name": "Phame",
"include": "(^src/applications/phame/)"
},
"phid": {
"name": "PHIDs",
"include": "(^src/applications/phid/)"
},
"phlux": {
"name": "Phlux",
"include": "(^src/applications/phlux/)"
},
"pholio": {
"name": "Pholio",
"include": "(^src/applications/pholio/)"
},
"phortune": {
"name": "Phortune",
"include": "(^src/applications/phortune/)"
},
"phpast": {
"name": "PHPAST",
"include": "(^src/applications/phpast/)"
},
"phragment": {
"name": "Phragment",
"include": "(^src/applications/phragment/)"
},
"phrequent": {
"name": "Phrequent",
"include": "(^src/applications/phrequent/)"
},
"phriction": {
"name": "Phriction",
"include": "(^src/applications/phriction/)"
},
"phui": {
"name": "PHUI",
"include": "(^src/view/phui/)"
},
"policy": {
"name": "Policy",
"include": "(^src/applications/policy/)"
},
"ponder": {
"name": "Ponder",
"include": "(^src/applications/ponder/)"
},
"project": {
"name": "Projects",
"include": "(^src/applications/project/)"
},
"releeph": {
"name": "Releeph",
"include": "(^src/applications/releeph/)"
},
"remarkup": {
"name": "Remarkup",
"include": [
"(^src/applications/remarkup/)",
"(^src/infrastructure/markup/)"
]
},
"repository": {
"name": "Repositories",
"include": "(^src/applications/repository/)"
},
"search": {
"name": "Search",
"include": "(^src/applications/search/)"
},
"settings": {
"name": "Settings",
"include": "(^src/applications/settings/)"
},
"slowvote": {
"name": "Slowvote",
"include": "(^src/applications/slowvote/)"
},
"spaces": {
"name": "Spaces",
"include": "(^src/applications/spaces/)"
},
"storage": {
"name": "Storage",
"include": "(^src/infrastructure/storage/)"
},
"subscriptions": {
"name": "Subscriptions",
"include": "(^src/applications/subscriptions/)"
},
"support": {
"name": "Support",
"include": "(^src/applications/support/)"
},
"system": {
"name": "System",
"include": "(^src/applications/system/)"
},
"tokens": {
"name": "Tokens",
"include": "(^src/applications/tokens/)"
},
"transactions": {
"name": "Transactions",
"include": "(^src/applications/transactions/)"
},
"typeahead": {
"name": "Typeahead",
"include": "(^src/applications/typeahead/)"
},
"uiexample": {
"name": "UI Examples",
"include": "(^src/applications/uiexample/)"
},
"xhprof": {
"name": "XHProf",
"include": "(^src/applications/xhprof/)"
}
}
}
diff --git a/src/docs/book/user.book b/src/docs/book/user.book
index fb2dccc578..449acd4ffc 100644
--- a/src/docs/book/user.book
+++ b/src/docs/book/user.book
@@ -1,45 +1,42 @@
{
- "name": "phabricator",
- "title": "Phabricator User Documentation",
- "short": "Phabricator User Docs",
- "preface": "Instructions for installing, configuring, and using Phabricator.",
+ "name": "phorge",
+ "title": "Phorge User Documentation",
+ "short": "User Docs",
+ "preface": "Instructions for installing, configuring, and using Phorge.",
"root": "../../../",
"uri.source":
- "https://secure.phabricator.com/diffusion/P/browse/master/%f$%l",
+ "https://we.phorge.it/diffusion/P/browse/master/%f$%l",
"rules": {
"(\\.diviner$)": "DivinerArticleAtomizer"
},
"exclude": [
"(^externals/)",
"(^resources/)",
"(^scripts/)",
"(^src/docs/contributor/)",
"(^src/docs/flavor/)",
"(^src/docs/tech/)",
"(^support/)",
"(^webroot/rsrc/externals/)"
],
"groups": {
"intro": {
"name": "Introduction"
},
"config": {
"name": "Configuration"
},
"userguide": {
"name": "Application User Guides"
},
"conduit": {
"name": "API Documentation"
},
"cluster": {
"name": "Cluster Configuration"
},
"fieldmanual": {
"name": "Field Manuals"
- },
- "cellar": {
- "name": "Musty Cellar"
}
}
}
diff --git a/src/docs/contributor/adding_new_classes.diviner b/src/docs/contributor/adding_new_classes.diviner
index ea932eba5c..64ab228ea3 100644
--- a/src/docs/contributor/adding_new_classes.diviner
+++ b/src/docs/contributor/adding_new_classes.diviner
@@ -1,256 +1,255 @@
@title Adding New Classes
@group developer
-Guide to adding new classes to extend Phabricator.
+Guide to adding new classes to extend Phorge.
Overview
========
-Phabricator is highly modular, and many parts of it can be extended by adding
+Phorge is highly modular, and many parts of it can be extended by adding
new classes. This document explains how to write new classes to change or
-expand the behavior of Phabricator.
+expand the behavior of Phorge.
-IMPORTANT: The upstream does not offer support with extension development.
+NOTE: The upstream offers limited support with extension development.
Fundamentals
============
-Phabricator primarily discovers functionality by looking at concrete subclasses
-of some base class. For example, Phabricator determines which applications are
+Phorge primarily discovers functionality by looking at concrete subclasses
+of some base class. For example, Phorge determines which applications are
available by looking at all of the subclasses of
@{class@phabricator:PhabricatorApplication}. It
discovers available workflows in `arc` by looking at all of the subclasses of
@{class@arcanist:ArcanistWorkflow}. It discovers available locales
by looking at all of the subclasses of @{class@arcanist:PhutilLocale}.
This pattern holds in many cases, so you can often add functionality by adding
-new classes with no other work. Phabricator will automatically discover and
+new classes with no other work. Phorge will automatically discover and
integrate the new capabilities or features at runtime.
There are two main ways to add classes:
- **Extensions Directory**: This is a simple way to add new code. It is
less powerful, but takes a lot less work. This is good for quick changes,
testing and development, or getting started on a larger project.
- **Creating Libraries**: This is a more advanced and powerful way to
organize extension code. This is better for larger or longer-lived
projects, or any code which you plan to distribute.
The next sections walk through these approaches in greater detail.
Extensions Directory
====================
-The easiest way to extend Phabricator by adding new classes is to drop them
-into the extensions directory, at `phabricator/src/extensions/`.
+The easiest way to extend Phorge by adding new classes is to drop them
+into the extensions directory, at `pohrge/src/extensions/`.
This is intended as a quick way to add small pieces of functionality, test new
-features, or get started on a larger project. Extending Phabricator like this
+features, or get started on a larger project. Extending Phorge like this
imposes a small performance penalty compared to using a library.
-This directory exists in all libphutil libraries, so you can find a similar
+This directory also exists for Arcanist, so you can find a similar
directory in `arcanist/src/extensions/`.
For example, to add a new application, create a file like this one and add it
-to `phabricator/src/extensions/`.
+to `phorge/src/extensions/`.
-```name=phabricator/src/extensions/ExampleApplication.php, lang=php
+```name=phorge/src/extensions/ExampleApplication.php, lang=php
<?php
final class ExampleApplication extends PhabricatorApplication {
public function getName() {
return pht('Example');
}
}
```
If you load {nav Applications} in the web UI, you should now see your new
application in the list. It won't do anything yet since you haven't defined
-any interesting behavior, but this is the basic building block of Phabricator
+any interesting behavior, but this is the basic building block of Phorge
extensions.
Creating Libraries
==================
-A more powerful (but more complicated) way to extend Phabricator is to create
-a libphutil library. Libraries can organize a larger amount of code, are easier
+A more powerful (but more complicated) way to extend Phorge is to create
+a library. Libraries can organize a larger amount of code, are easier
to work with and distribute, and have slightly better performance than loose
source files in the extensions directory.
In general, you'll perform these one-time setup steps to create a library:
- Create a new directory.
- Use `arc liberate` to initialize and name the library.
- - Configure Phabricator or Arcanist to load the library.
+ - Configure Phorge or Arcanist to load the library.
Then, to add new code, you do this:
- Write or update classes.
- Update the library metadata by running `arc liberate` again.
Initializing a Library
======================
-To create a new libphutil library, create a directory for it and run
+To create a new library, create a directory for it and run
`arc liberate` on the directory. This documentation will use a conventional
directory layout, which is recommended, but you are free to deviate from this.
```
$ mkdir libcustom/
$ cd libcustom/
libcustom/ $ arc liberate src/
```
Now you'll get a prompt like this:
```lang=txt
No library currently exists at that path...
The directory '/some/path/libcustom/src' does not exist.
Do you want to create it? [y/N] y
Creating new libphutil library in '/some/path/libcustom/src'.
Choose a name for the new library.
What do you want to name this library?
```
Choose a library name (in this case, "libcustom" would be appropriate) and it
you should get some details about the library initialization:
```lang=txt
Writing '__phutil_library_init__.php' to
'/some/path/libcustom/src/__phutil_library_init__.php'...
Using library root at 'src'...
Mapping library...
Verifying library...
Finalizing library map...
OKAY Library updated.
```
This will write three files:
- `src/.phutil_module_cache` This is a cache which makes "arc liberate"
faster when you run it to update the library. You can safely remove it at
any time. If you check your library into version control, you can add this
file to ignore rules (like `.gitignore`).
- `src/__phutil_library_init__.php` This records the name of the library and
tells libphutil that a library exists here.
- `src/__phutil_library_map__.php` This is a map of all the symbols
(functions and classes) in the library, which allows them to be autoloaded
at runtime and dependencies to be statically managed by `arc liberate`.
-Linking with Phabricator
-========================
+Linking with Phorge
+===================
-If you aren't using this library with Phabricator (e.g., you are only using it
-with Arcanist or are building something else on libphutil) you can skip this
+(NOTE) If you aren't using this library with Phorge (e.g., you are only using it
+with Arcanist or are building something else) you can skip this
step.
-But, if you intend to use this library with Phabricator, you need to define its
-dependency on Phabricator by creating a `.arcconfig` file which points at
-Phabricator. For example, you might write this file to
+But, if you intend to use this library with Phorge, you need to define its
+dependency on Phorge by creating a `.arcconfig` file which points at
+Phorge. For example, you might write this file to
`libcustom/.arcconfig`:
```lang=json
{
"load": [
- "phabricator/src/"
+ "phorge/src/"
]
}
```
For details on creating a `.arcconfig`, see
@{article:Arcanist User Guide: Configuring a New Project}. In general, this
-tells `arc liberate` that it should look for symbols in Phabricator when
+tells `arc liberate` that it should look for symbols in Phorge when
performing static analysis.
-NOTE: If Phabricator isn't located next to your custom library, specify a
-path which actually points to the `phabricator/` directory.
+NOTE: If Phorge isn't located next to your custom library, specify a
+path which actually points to the `phorge/` directory.
You do not need to declare dependencies on `arcanist`, since `arc liberate`
automatically loads them.
-Finally, edit your Phabricator config to tell it to load your library at
+Finally, edit your Phorge config to tell it to load your library at
runtime, by adding it to `load-libraries`:
```lang=json
...
'load-libraries' => array(
'libcustom' => 'libcustom/src/',
),
...
```
-Now, Phabricator will be able to load classes from your custom library.
+Now, Phorge will be able to load classes from your custom library.
Writing Classes
===============
To actually write classes, create a new module and put code in it:
libcustom/ $ mkdir src/example/
libcustom/ $ nano src/example/ExampleClass.php # Edit some code.
Now, run `arc liberate` to regenerate the static resource map:
libcustom/ $ arc liberate src/
This will automatically regenerate the static map of the library.
What You Can Extend And Invoke
==============================
-Arcanist and Phabricator are strict about extensibility of classes and
+Arcanist and Pohrge are strict about extensibility of classes and
visibility of methods and properties. Most classes are marked `final`, and
methods have the minimum required visibility (protected or private). The goal
of this strictness is to make it clear what you can safely extend, access, and
invoke, so your code will keep working as the upstream changes.
-IMPORTANT: We'll still break APIs frequently. The upstream does not support
-extension development, and none of these APIs are stable.
+IMPORTANT: We'll still break APIs frequently. The upstream offers limited
+ support for extension development, and none of these APIs are stable.
-When developing libraries to work with Arcanist and Phabricator, you should
+When developing libraries to work with Arcanist and Phorge, you should
respect method and property visibility.
If you want to add features but can't figure out how to do it without changing
-Phabricator code, here are some approaches you may be able to take:
+Phorge code, here are some approaches you may be able to take:
- {icon check, color=green} **Use Composition**: If possible, use composition
rather than extension to build your feature.
- {icon check, color=green} **Find Another Approach**: Check the
documentation for a better way to accomplish what you're trying to do.
- {icon check, color=green} **File a Feature Request**: Let us know what your
use case is so we can make the class tree more flexible or configurable, or
point you at the right way to do whatever you're trying to do, or explain
- why we don't let you do it. Note that we **do not support** extension
- development so you may have mixed luck with this one.
+ why we don't let you do it.
These approaches are **discouraged**, but also possible:
- {icon times, color=red} **Fork**: Create an ad-hoc local fork and remove
`final` in your copy of the code. This will make it more difficult for you
to upgrade in the future, although it may be the only real way forward
depending on what you're trying to do.
- {icon times, color=red} **Use Reflection**: You can use
[[ http://php.net/manual/en/book.reflection.php | Reflection ]] to remove
modifiers at runtime. This is fragile and discouraged, but technically
possible.
- {icon times, color=red} **Remove Modifiers**: Send us a patch removing
`final` (or turning `protected` or `private` into `public`). We will almost
never accept these patches unless there's a very good reason that the
current behavior is wrong.
Next Steps
==========
Continue by:
- - visiting the [[ https://secure.phabricator.com/w/community_resources/ |
+ - visiting the [[ https://we.phorge.it/w/community_resources/ |
Community Resources ]] page to find or share extensions and libraries.
diff --git a/src/docs/contributor/adding_new_css_and_js.diviner b/src/docs/contributor/adding_new_css_and_js.diviner
index 00a3808fba..16bf5ebe03 100644
--- a/src/docs/contributor/adding_new_css_and_js.diviner
+++ b/src/docs/contributor/adding_new_css_and_js.diviner
@@ -1,96 +1,94 @@
@title Adding New CSS and JS
@group developer
-Explains how to add new CSS and JS files to Phabricator.
+Explains how to add new CSS and JS files to Phorge.
= Overview =
-Phabricator uses a system called **Celerity** to manage static resources. If you
-are a current or former Facebook employee, Celerity is based on the Haste system
-used at Facebook and generally behaves similarly.
+Phorge uses a system called **Celerity** to manage static resources.
-This document is intended for Phabricator developers and contributors. This
+(NOTE) This document is intended for Phorge developers and contributors. This
process will not work correctly for third-party code, plugins, or extensions.
= Adding a New File =
To add a new CSS or JS file, create it in an appropriate location in
-`webroot/rsrc/css/` or `webroot/rsrc/js/` inside your `phabricator/`
+`webroot/rsrc/css/` or `webroot/rsrc/js/` inside your `phorge/`
directory.
Each file must `@provides` itself as a component, declared in a header comment:
LANG=css
/**
* @provides duck-styles-css
*/
.duck-header {
font-size: 9001px;
}
Note that this comment must be a Javadoc-style comment, not just any comment.
If your component depends on other components (which is common in JS but
rare and inadvisable in CSS), declare then with `@requires`:
LANG=js
/**
* @requires javelin-stratcom
* @provides duck
*/
/**
* Put class documentation here, NOT in the header block.
*/
JX.install('Duck', {
...
});
Then rebuild the Celerity map (see the next section).
= Changing an Existing File =
When you add, move or remove a file, or change the contents of existing JS or
CSS file, you should rebuild the Celerity map:
- phabricator/ $ ./bin/celerity map
+ phorge/ $ ./bin/celerity map
If you've only changed file content things will generally work even if you
don't, but they might start not working as well in the future if you skip this
step.
The generated file `resources/celerity/map.php` causes merge conflicts
quite often. They can be resolved by running the Celerity mapper. You can
automate this process by running:
- phabricator/ $ ./scripts/celerity/install_merge.sh
+ phorge/ $ ./scripts/celerity/install_merge.sh
This will install Git merge driver which will run when a conflict in this file
occurs.
= Including a File =
To include a CSS or JS file in a page, use
@{function:require_celerity_resource}:
require_celerity_resource('duck-style-css');
require_celerity_resource('duck');
If your map is up to date, the resource should now be included correctly when
the page is rendered.
You should place this call as close to the code which actually uses the resource
as possible, i.e. **not** at the top of your Controller. The idea is that you
should @{function:require_celerity_resource} a resource only if you are actually
using it on a specific rendering of the page, not just because some views of the
page might require it.
= Next Steps =
Continue by:
- reading about Javascript-specific guidelines in @{article:Javascript Coding
Standards}; or
- reading about CSS-specific guidelines and features in @{article:CSS Coding
Standards}.
diff --git a/src/docs/contributor/assistive_technologies.diviner b/src/docs/contributor/assistive_technologies.diviner
index a519bde881..98113b2a07 100644
--- a/src/docs/contributor/assistive_technologies.diviner
+++ b/src/docs/contributor/assistive_technologies.diviner
@@ -1,76 +1,76 @@
@title Assistive Technologies
@group developer
-Information about making Phabricator accessible to assistive technologies.
+Information about making Phorge accessible to assistive technologies.
Overview
========
Assistive technologies help people with disabilities use the web. For example,
screen readers can assist people with limited or no eyesight by reading the
contents of pages aloud.
-Phabricator has some support for assistive technologies, and we'd like to have
+Phorge has some support for assistive technologies, and we'd like to have
more support. This document describes how to use the currently available
-features to improve the accessibility of Phabricator.
+features to improve the accessibility of Phorge.
Aural-Only Elements
===================
The most common issue assistive technologies encounter is buttons, links, or
other elements which only convey information visually (usually through an icon
or image).
These elements can be made more accessible by providing an aural-only label.
This label will not be displayed by visual browsers, but will be read by screen
readers.
To add an aural-only label to an element, use `javelin_tag()` with the
`aural` attribute:
javelin_tag(
'span',
array(
'aural' => true,
),
pht('Aural Label Here'));
This label should be placed inside the button or link that you are labeling.
You can also use `aural` on a container to provide an entirely different
replacement element, but should be cautious about doing this.
NOTE: You must use `javelin_tag()`, not `phutil_tag()`, to get support for
this attribute.
Visual-Only Elements
====================
Occasionally, a visual element should be hidden from screen readers. This should
be rare, but some textual elements convey very little information or are
otherwise disruptive for aural users.
This technique can also be used to offer a visual alternative of an element
and a different aural alternative element. However, this should be rare: it is
usually better to adapt a single element to work well for both visual and aural
users.
You can mark an element as visual-only by using `javelin_tag()` with the
`aural` attribute:
javelin_tag(
'span',
array(
'aural' => false,
),
$ascii_art);
Previewing Aural Pages
======================
To verify aural markup, you can add `?__aural__=1` to any page URI. This will
-make Phabricator render the page with styles that reveal aural-only elements and
+make Phorge render the page with styles that reveal aural-only elements and
mute visual-only elements.
diff --git a/src/docs/contributor/bug_reports.diviner b/src/docs/contributor/bug_reports.diviner
index e86fc917a5..3ded817a48 100644
--- a/src/docs/contributor/bug_reports.diviner
+++ b/src/docs/contributor/bug_reports.diviner
@@ -1,5 +1,156 @@
@title Contributing Bug Reports
@group detail
-Effective June 1, 2021: Phabricator is no longer actively maintained and no longer accepts bug reports.
+Describes how to file an effective Phorge bug report.
+
+Overview
+========
+
+This article describes how to file an effective Phorge bug report.
+
+The most important things to do are:
+
+ - check the list of common fixes below;
+ - make sure Phorge is up to date;
+ - make sure we support your setup;
+ - gather debugging information; and
+ - explain how to reproduce the issue.
+
+The rest of this article walks through these points in detail.
+
+For general information on contributing to Phorge, see
+@{article:Contributor Introduction}.
+
+
+Common Fixes
+============
+
+Before you file a report, here are some common solutions to problems:
+
+ - **Update Phorge**: We receive a lot of bug reports about issues we have
+ already fixed in HEAD. Updating often resolves issues. It is common for
+ issues to be fixed in less than 24 hours, so even if you've updated recently
+ you should update again. If you aren't sure how to update, see the next
+ section.
+ - **Update Libraries**: Make sure `arcanist/` and `phorge/` are all up
+ to date. Users often update `phorge/` but forget to update `arcanist/`.
+ When you update, make sure you update both libraries.
+ - **Restart Apache or PHP-FPM**: Phorge uses caches which don't get
+ reset until you restart Apache or PHP-FPM. After updating, make sure you
+ restart.
+
+
+Update Phorge
+=============
+
+Before filing a bug, make sure you are up to date. We receive many bug reports
+for issues we have already fixed, and even if we haven't fixed an issue we'll
+be able to resolve it more easily if you file a report based on HEAD. (For
+example, an old stack trace may not have the right line numbers, which will
+make it more difficult for us to figure out what's going wrong.)
+
+To update Phorge, use a script like the one described in
+@{article:Upgrading Phorge}.
+
+**If you can not update** for some reason, please include the version of
+Phorge and Arcanist you are running when you file a report.
+
+For help, see @{article:Providing Version Information}.
+
+
+Supported Issues
+================
+
+Before filing a bug, make sure you're filing an issue against something we
+support.
+
+**We can NOT help you with issues we can not reproduce.** It is critical that
+you explain how to reproduce the issue when filing a report.
+
+For help, see @{article:Providing Reproduction Steps}.
+
+**We do NOT support prototype applications.** If you're running into an issue
+with a prototype application, you're on your own. For more information about
+prototype applications, see @{article:User Guide: Prototype Applications}.
+
+**We do NOT support third-party packages or instructions.** If you installed
+Phorge (or configured some aspect of it) using a third-party package or by
+following a third-party guide (like a blog post), we can not help you.
+Phorge changes quickly and third-party information is unreliable and often
+falls out of date. Contact the maintainer of the package or guide you used,
+or reinstall following the upstream instructions.
+
+**We do NOT support custom code development or third-party libraries.** If
+you're writing an extension, you're on your own. We provide some documentation,
+but can not help you with extension or library development. If you downloaded a
+library from somewhere, contact the library maintainer.
+
+**We do NOT support bizarre environments.** If your issue is specific to an
+unusual installation environment, we generally will not help you find a
+workaround. Install Phorge in a normal environment instead. Examples of
+unusual environments are shared hosts, nontraditional hosts (gaming consoles,
+storage appliances), and hosts with unusually tight resource constraints. The
+vast majority of users run Phorge in normal environments (modern computers
+with root access) and these are the only environments we support.
+
+Otherwise, if you're having an issue with a supported first-party application
+and followed the upstream install instructions on a normal computer, we're happy
+to try to help.
+
+
+Getting More Information
+========================
+
+For some issues, there are places you can check for more information. This may
+help you resolve the issue yourself. Even if it doesn't, this information can
+help us figure out and resolve an issue.
+
+ - For issues with `arc` or any other command-line script, you can get more
+ details about what the script is doing by adding the `--trace` flag.
+ - For issues with Phorge, check your webserver error logs.
+ - For Apache, this is often `/var/log/httpd/error.log`, or
+ `/var/log/apache2/error.log` or similar.
+ - For nginx, check both the nginx and php-fpm logs.
+ - For issues with the UI, check the Javascript error console in your web
+ browser.
+ - Some other things, like daemons, have their own debug flags or
+ troubleshooting steps. Check the documentation for information on
+ troubleshooting. Adjusting settings or enabling debugging modes may give
+ you more information about the issue.
+
+
+Reproducibility
+===============
+
+The most important part of your report content is instructions on how to
+reproduce the issue. What did you do? If you do it again, does it still break?
+Does it depend on a specific browser? Can you reproduce the issue on
+a fresh, unmodified Phorge instance?
+
+It is nearly impossible for us to resolve many issues if we can not reproduce
+them. We will not accept reports which do not contain the information required
+to reproduce problems.
+
+For help, see @{article:Providing Reproduction Steps}.
+
+
+File a Bug Report
+=================
+
+If you're up to date, have collected information about the problem, and have
+the best reproduction instructions you can come up with, you're ready
+to file a report.
+
+It is **particularly critical** that you include reproduction steps.
+
+You can file a report [[ https://we.phorge.it/maniphest/task/edit/form/2/ | on this instance]].
+
+
+Next Steps
+==========
+
+Continue by:
+
+ - reading general support information in @{article:Support Resources}; or
+ - returning to the @{article:Contributor Introduction}.
diff --git a/src/docs/tech/celerity.diviner b/src/docs/contributor/celerity.diviner
similarity index 99%
rename from src/docs/tech/celerity.diviner
rename to src/docs/contributor/celerity.diviner
index 8501b1b536..a5706198d3 100644
--- a/src/docs/tech/celerity.diviner
+++ b/src/docs/contributor/celerity.diviner
@@ -1,62 +1,62 @@
@title Celerity Technical Documentation
-@group celerity
+@group developer
Technical overview of the Celerity system.
= Overview =
Celerity is a static resource (CSS and JS) management system, which handles:
- Keeping track of which resources a page needs.
- Generating URIs for the browser to access resources.
- Managing dependencies between resources.
- Packaging resources into fewer HTTP requests for performance.
- Preprocessing resources (e.g., stripping comments and whitespace).
- Delivering resources and managing resource cache lifetimes.
- Interfacing with the client to manage resources.
Celerity is an outgrowth of the //Haste// system at Facebook. You can find more
information about Celerity here:
- @{article:Things You Should Do Soon: Static Resources} describes the history
and context of the system and the problems it solves.
- @{article:Adding New CSS and JS} provides a developer guide to using
Celerity.
= Class Relationships =
Celerity's primary API is @{function:require_celerity_resource}, which marks a
resource for inclusion when a response is rendered (e.g., when the HTML page is
generated, or when the response to an Ajax request is built). For instance, if
you use a CSS class like "widget-view", you must ensure the appropriate CSS is
included by calling `require_celerity_resource('widget-view-css')` (or
similar), at your use site.
This function uses @{class:CelerityAPI} to access the active
@{class:CelerityStaticResourceResponse} and tells it that it needs to include
the resource later, when the response actually gets built. (This layer of
indirection provides future-proofing against certain complex situations Facebook
eventually encountered).
When the time comes to render the response, the page renderer uses
@{class:CelerityAPI} to access the active
@{class:CelerityStaticResourceResponse} and requests that it render out
appropriate references to CSS and JS resources. It uses
@{class:CelerityResourceMap} to determine the dependencies for the requested
resources (so you only have to explicitly include what you're actually using,
and not all of its dependencies) and any packaging rules (so it may be able to
generate fewer resource requests, improving performance). It then generates
`<script />` and `<link />` references to these resources.
These references point at `/res/` URIs, which are handled by
@{class:CelerityResourceController}. It responds to these requests and delivers
the relevant resources and packages, managing cache lifetimes and handling any
necessary preprocessing. It uses @{class:CelerityResourceMap} to locate resources
and read packaging rules.
The dependency and packaging maps are generated by `bin/celerity map`,
which updates `resources/celerity/map.php`.
@{class:CelerityStaticResourceResponse} also manages some Javelin information,
and @{function:celerity_generate_unique_node_id} uses this metadata to provide
a better uniqueness guarantee when generating unique node IDs.
diff --git a/src/docs/contributor/cla.diviner b/src/docs/contributor/cla.diviner
deleted file mode 100644
index 54e0142beb..0000000000
--- a/src/docs/contributor/cla.diviner
+++ /dev/null
@@ -1,169 +0,0 @@
-@title Understanding the Phacility CLA
-@group detail
-
-Describes the Contributor License Agreement (CLA).
-
-Overview
-========
-
-IMPORTANT: This document is not legal advice.
-
-Phacility requires contributors to sign a Contributor License Agreement
-(often abbreviated "CLA") before we can accept contributions into the upstream.
-This document explains what this document means and why we require it.
-
-This requirement is not unusual, and many large open source projects require a
-similar CLA, including Python, Go, jQuery, and Apache Software Foundation
-projects.
-
-You can read more about CLAs and find more examples of companies and projects
-which require them on Wikipedia's
-[[ https://en.wikipedia.org/wiki/Contributor_License_Agreement | CLA ]] page.
-
-Our CLA is substantially similar to the CLA required by Apache, the
-"Apache Individual Contributor License Agreement V2.0". Many projects which
-require a CLA use this CLA or a similar one.
-
-
-Why We Require a CLA
-====================
-
-While many projects require a CLA, others do not. This project requires a CLA
-primarily because:
-
- - it gives us certain rights, particularly the ability to relicense the work
- later;
- - it makes the terms of your contribution clear, protecting us from liability
- related to copyright and patent disputes.
-
-**More Rights**: We consider the cost of maintaining changes to greatly
-outweigh the cost of writing them in the first place. When we accept work
-into the upstream, we are agreeing to bear that maintenance cost.
-
-This cost is not worthwhile to us unless the changes come with no strings
-attached. Among other concerns, we would be unable to redistribute Phabricator
-under a different license in the future without the additional rights the CLA
-gives us.
-
-For a concrete example of the problems this causes, Bootstrap switched from
-GPLv2 to MIT in 2012-2013. You can see the issue tracking the process and read
-about what they had to go through to do this here:
-
-https://github.com/twbs/bootstrap/issues/2054
-
-This took almost 18 months and required a huge amount of effort. We are not
-willing to encumber the project with that kind of potential cost in order to
-accept contributions.
-
-The rights you give us by signing the CLA allow us to release the software
-under a different license later without asking you for permission, including a
-license you may not agree with.
-
-They do not allow us to //undo// the existing release under the Apache license,
-but allow us to make an //additional// release under a different license, or
-release under multiple licenses (if we do, users may choose which license or
-licenses they wish to use the software under). It would also allow us to
-discontinue updating the release under the Apache license.
-
-While we do not currently plan to relicense Phabricator, we do not want to
-give up the ability to do so: we may want or need to in the future.
-
-The most likely scenario which would lead to us changing the license is if a
-new version of the Apache license is released. Open source software licenses
-are still largely untested in the US legal system, and they may face challenges
-in the future which could require adapting them to a changing legal
-environment. If this occurs, we would want to be able to update to a newer
-version of the license which accounted for these changes.
-
-It is also possible that we may want to change open source licenses (for
-example, to MIT) or adopt dual-licensing (for example, both Apache and MIT). We
-might want to do this so that our license is compatible with the licenses used
-by other software we want to be distributed alongside.
-
-Although we currently believe it is unlikely, it is also possible we may want
-to relicense Phabricator under a closed, proprietary, or literally evil license.
-By signing the CLA, you are giving us the power to do this without requiring
-you to consent. If you are not comfortable with this, do not sign the CLA and
-do not contribute to Phabricator.
-
-**Limitation of Liability**: The second benefit the CLA provides is that it
-makes the terms of your contribution explicitly clear upfront, and it puts us
-in a much stronger legal position if a contributor later claims there is
-ambiguity about ownership of their work. We can point at the document they
-signed as proof that they consented to our use and understood the terms of
-their contribution.
-
-//SCO v. IBM// was a lawsuit filed in 2003 alleging (roughly) that IBM had
-improperly contributed code owned by SCO to Linux. The details of this and the
-subsequent cases are very complex and the situation is not a direct parallel to
-anything we are likely to face, but SCO claimed billions of dollars in damages
-and the litigation has now been ongoing for more than a decade.
-
-We want to avoid situations like this in the future by making the terms of
-contribution explicit upfront.
-
-Generally, we believe the terms of the CLA are fair and reasonable for
-contributors, and that the primary way contributors benefit from contributing
-to Phabricator is that we publish and maintain their changes so they do not
-have to fork the software.
-
-If you have strong ideological reasons for contributing to open source, you may
-not be comfortable with the terms of the CLA (for example, it may be important
-to you that your changes are never available under a license which you haven't
-explicitly approved). This is fine and we can understand why contributors may
-hold this viewpoint, but we can not accept your changes into the upstream.
-
-
-Corporate vs Individual CLAs
-============================
-
-We offer two CLAs:
-
- - {L28}
- - {L30}
-
-These are both substantially similar to the corresponding Apache CLAs.
-
-If you own the work you are contributing, sign the individual CLA. If your
-employer owns the work you are contributing, have them sign the corporate CLA.
-
-**If you are employed, there is a substantial possibility that your employer
-owns your work.** If they do, you do not have the right to contribute it to us
-or assign the rights that we require, and can not contribute under the
-individual CLA. Work with your employer to contribute under the corporate CLA
-instead.
-
-Particularly, this clause in the individual CLA is the important one:
-
-> 4. You represent that you are legally entitled to grant the above license. If
-> your employer(s) has rights to intellectual property that you create that
-> includes your Contributions, you represent that you have received permission
-> to make Contributions on behalf of that employer, that your employer has
-> waived such rights for your Contributions to Phacility, or that your employer
-> has executed a separate Corporate CLA with Phacility.
-
-Ownership of your work varies based on where you live, how you are employed,
-and your agreements with your employer. However, at least in the US, it is
-likely that your employer owns your work unless you have anticipated conflicts
-and specifically avoided them. This generally makes sense: if you are paid by
-your employer for your work, they own the product of your work and you receive
-salary and benefits in fair exchange for that work.
-
-Your employer may have an ownership claim on your work even if you perform it
-on your own time, if you use their equipment (like a company laptop or phone),
-resources, facilities, or trade secrets, or signed something like an "Invention
-Assignment Agreement" when you were hired. Such agreements are common. The
-details of the strength of their claim will vary based on your situation and
-local law.
-
-If you are unsure, you should speak with your employer or a lawyer. If you
-contribute code you do not own under the individual CLA, you are exposing
-yourself to liability. You may also be exposing us to liability, but we'll have
-the CLA on our side to show that we were unwilling pawns in your malicious
-scheme to defraud your employer.
-
-The good news is that most employers are happy to contribute to open source
-projects. Incentives are generally well aligned: they get features they want,
-and it reflects well on them. In the past, potential contributors who have
-approached their employers about a corporate CLA have generally had little
-difficulty getting approval.
diff --git a/src/docs/contributor/contrib_intro.diviner b/src/docs/contributor/contrib_intro.diviner
index 59ad9b44df..00a2e42c0e 100644
--- a/src/docs/contributor/contrib_intro.diviner
+++ b/src/docs/contributor/contrib_intro.diviner
@@ -1,54 +1,41 @@
@title Contributor Introduction
@group contrib
-Introduction to contributing to Phabricator and Arcanist.
+Introduction to contributing to Phorge and Arcanist.
Overview
========
-If you'd like to contribute to Phabricator, this document can guide you though
+If you'd like to contribute to Phorge, this document can guide you though
ways you can help improve the project.
Writing code is valuable, but often isn't the best or easiest way to contribute.
In most cases we are pretty good at fixing easy stuff quickly, so we don't have
a big pile of easy stuff sitting around waiting for new contributors.
This can make it difficult to contribute code if you only have a little bit of
time to spend since most of the work that needs to be done usually requires some
heavy lifting.
Without writing any code, learning the whole codebase, making a big time
commitment, or having to touch PHP, here are some ways you can materially
-contribute to Phabricator:
+contribute to Phorge:
- - Drop by the [[ https://phurl.io/u/discourse | community forum ]] just to
- say "thanks". A big part of the reason we build this software is to help
- people solve problems, and knowing that our efforts are appreciated is
- really rewarding.
- - Recommend Phabricator to people who you think might find it useful. Our
+ - Recommend Phorge to people who you think might find it useful. Our
most powerful growth channel is word of mouth, and mentioning or tweeting
- about Phabricator helps the project grow. If writing a tweet sounds like
- too much work, you can use one of these form tweets written by our PR
- department to quickly and easily shill on our behalf. Hail corporate!
-
-> Phabricator seems like it's pretty okay
-
-> I am not being paid to mention Phabricator in this extemporaneous, completely organic tweet
-
-> Phabricator is objectively the best thing. Source: I am a certified, internationally recognized expert.
-
+ about Phorge helps the project grow.
- Submit high-quality bug reports by carefully following the guide in
@{article:Contributing Bug Reports}.
If all of this sounds nice but you really just want to write some code, be
aware that this project often presents a high barrier to entry for new
contributors. To continue, see @{article:Contributing Code}.
Next Steps
==========
Continue by:
- learning about bug reports in @{article:Contributing Bug Reports};
- learning about code contributions in @{article:Contributing Code}.
diff --git a/src/docs/contributor/contributing_code.diviner b/src/docs/contributor/contributing_code.diviner
index b6816e03b5..c7bdec25c9 100644
--- a/src/docs/contributor/contributing_code.diviner
+++ b/src/docs/contributor/contributing_code.diviner
@@ -1,4 +1,197 @@
@title Contributing Code
@group detail
-Effective June 1, 2021: Phabricator is no longer actively maintained, and no longer accepting contributions.
+Phorge is an open-source project, and welcomes contributions from the community
+at large. However, there are some guidelines we ask you to follow.
+
+
+Overview
+========
+
+The most important parts of contributing code to Phorge are:
+
+ - File a task with a bug report or feature request //before// you write code.
+ - We do not accept GitHub pull requests.
+ - Some alternative approaches are available if your change isn't something
+ we want to bring upstream.
+
+The rest of this article describes these points in more detail, and then
+provides guidance on writing and submitting patches.
+
+If you just want to contribute some code but don't have a specific bug or
+feature in mind, see the bottom of this document for tips on finding ways to get
+started.
+
+For general information on contributing to Phorge, see
+@{article:Contributor Introduction}.
+
+
+Coordinate First
+================
+
+Before sending code, you should file a task describing what you'd like to write.
+
+When you file a task, mention that you'd like to write the code to fix it. We
+can help contextualize your request or bug and guide you through writing an
+upstreamable patch, provided it's something that's upstreamable. If it isn't
+upstreamable, we can let you know what the issues are and help find another
+plan of attack.
+
+You don't have to file first (for example, if you spot a misspelling it's
+normally fine to just send a diff), but for anything even moderately complex
+you're strongly encouraged to file first and coordinate with the upstream.
+
+
+Rejecting Patches
+=================
+
+If you send us a patch without coordinating it with us first, it will probably
+be immediately rejected, or sit in limbo for a long time and eventually be
+rejected. The reasons we do this vary from patch to patch, but some of the most
+common reasons are:
+
+**Unjustifiable Costs**: We support code in the upstream forever. Support is
+enormously expensive and takes up a huge amount of our time. The cost to support
+a change over its lifetime is often 10x or 100x or 1000x greater than the cost
+to write the first version of it. Many uncoordinated patches we receive are
+"white elephants", which would cost much more to maintain than the value they
+provide.
+
+As an author, it may look like you're giving us free work and we're rejecting it
+as too expensive, but this viewpoint doesn't align with the reality of a large
+project which is actively supported by a small, experienced team. Writing code
+is cheap; maintaining it is expensive.
+
+By coordinating with us first, you can make sure the patch is something we
+consider valuable enough to put long-term support resources behind, and that
+you're building it in a way that we're comfortable taking over.
+
+**Not a Good Fit**: Many patches aren't good fits for the upstream: they
+implement features we simply don't want. Coordinating with us first helps
+make sure we're on the same page and interested in a feature.
+
+The most common type of patch along these lines is a patch which adds new
+configuration options. We consider additional configuration options to have
+an exceptionally high lifetime support cost and are very unlikely to accept
+them. Coordinate with us first.
+
+**Not a Priority**: If you send us a patch against something which isn't a
+priority, we probably won't have time to look at it. We don't give special
+treatment to low-priority issues just because there's code written: we'd still
+be spending time on something lower-priority when we could be spending it on
+something higher-priority instead.
+
+If you coordinate with us first, you can make sure your patch is in an area
+of the codebase that we can prioritize.
+
+**Overly Ambitious Patches**: Sometimes we'll get huge patches from new
+contributors. These can have a lot of fundamental problems and require a huge
+amount of our time to review and correct. If you're interested in contributing,
+you'll have more success if you start small and learn as you go.
+
+We can help you break a large change into smaller pieces and learn how the
+codebase works as you proceed through the implementation, but only if you
+coordinate with us first.
+
+**Generality**: We often receive several feature requests which ask for similar
+features, and can come up with a general approach which covers all of the use
+cases. If you send us a patch for //your use case only//, the approach may be
+too specific. When a cleaner and more general approach is available, we usually
+prefer to pursue it.
+
+By coordinating with us first, we can make you aware of similar use cases and
+opportunities to generalize an approach. These changes are often small, but can
+have a big impact on how useful a piece of code is.
+
+**Infrastructure and Sequencing**: Sometimes patches are written against a piece
+of infrastructure with major planned changes. We don't want to accept these
+because they'll make the infrastructure changes more difficult to implement.
+
+Coordinate with us first to make sure a change doesn't need to wait on other
+pieces of infrastructure. We can help you identify technical blockers and
+possibly guide you through resolving them if you're interested.
+
+
+No Prototype Changes
+====================
+
+With rare exceptions, we do not accept patches for prototype applications for
+the same reasons that we don't accept feature requests or bug reports. To learn
+more about prototype applications, see
+@{article:User Guide: Prototype Applications}.
+
+
+No Pull Requests
+================
+
+We do not accept pull requests on GitHub:
+
+ - Pull requests do not get lint and unit tests run, so issues which are
+ normally caught statically can slip by.
+ - Phorge is code review software, and developed using its own workflows.
+ Pull requests bypass some of these workflows (for example, they will not
+ trigger Herald rules to notify interested parties).
+ - GitHub is not the authoritative master repository and we maintain a linear
+ history, so merging pull requests is cumbersome on our end.
+ - If you're comfortable enough with Phorge to contribute to it, you
+ should also be comfortable using it to submit changes.
+
+Instead of sending a pull request, use `arc diff` to create a revision on the
+upstream install. Your change will go through the normal Phorge review
+process.
+
+(GitHub does not allow repositories to disable pull requests, which is why
+it's technically possible to submit them.)
+
+
+Alternatives
+============
+
+If you've written code but we're not accepting it into the upstream, some
+alternative approaches include:
+
+**Maintain a local fork.** This will require some ongoing effort to port your
+changes forward when you update, but is often very reasonable for simple
+changes.
+
+**Develop as an application.** Many parts of Phorge's infrastructure are
+modular, and modularity is increasing over time. A lot of changes can be built
+as external modules or applications without forking Phorge itself. There
+isn't much documentation for this right now, but you can look at
+how other applications are implemented, and at other third-party code that
+extends Phorge.
+
+**Rise to prominence.** We're more willing to accept borderline changes from
+community members who are active, make multiple contributions, or have a history
+with the project. This is not carte blanche, but distinguishing yourself can
+make us feel more comfortable about supporting a change which is slightly
+outside of our comfort zone.
+
+
+Writing and Submitting Patches
+==================
+
+To actually submit a patch, run `arc diff` in `phorge/` or `arcanist/`.
+When executed in these directories, `arc` should automatically talk to the
+upstream install. You can add #blessed_reviewers as a reviewer.
+
+You should read the relevant coding convention documents before you submit a
+change. If you're a new contributor, you don't need to worry about this too
+much. Just try to make your code look similar to the code around it, and we
+can help you through the details during review.
+
+ - @{article:General Coding Standards} (for all languages)
+ - @{article:PHP Coding Standards} (for PHP)
+ - @{article:Javascript Coding Standards} (for Javascript)
+
+In general, if you're coordinating with us first, we can usually provide
+guidance on how to implement things. The other articles in this section also
+provide information on how to work in the Phorge codebase.
+
+
+Next Steps
+==========
+
+Continue by:
+
+ - returning to the @{article:Contributor Introduction}.
diff --git a/src/docs/contributor/css_coding_standards.diviner b/src/docs/contributor/css_coding_standards.diviner
index c321124eae..e83778f24c 100644
--- a/src/docs/contributor/css_coding_standards.diviner
+++ b/src/docs/contributor/css_coding_standards.diviner
@@ -1,91 +1,91 @@
@title CSS Coding Standards
@group standards
-This document describes CSS features and coding standards for Phabricator.
+This document describes CSS features and coding standards for Phorge.
= Overview =
This document describes technical and style guidelines for writing CSS in
-Phabricator.
+Phorge.
-Phabricator has a limited CSS preprocessor. This document describes the features
+Phorge has a limited CSS preprocessor. This document describes the features
it makes available.
= Z-Indexes =
You should put all `z-index` rules in `z-index.css`, and keep them sorted. The
goal is to make indexes relatively manageable and reduce the escalation of the
Great Z-Index War where all indexes grow without bound in an endless arms race.
= Color Variables =
-Phabricator's preprocessor provides some standard color variables. You can
+Phorge's preprocessor provides some standard color variables. You can
reference these with `{$color}`. For example:
lang=css
span.critical {
color: {$red};
}
You can find a list of all available colors in the **UIExamples** application.
= Printable Rules =
If you preface a rule with `!print`, it will be transformed into a print rule
and activated when the user is printing the page or viewing a printable version
of the page:
lang=css
!print div.menu {
display: none;
}
Specifically, this directive causes two copies of the rule to be written out.
The output will look something like this:
lang=css
.printable div.menu {
display: none;
}
@media print {
div.menu {
display: none;
}
}
The former will activate when users look at the printable versions of pages, by
adding `__print__` to the URI. The latter will be activated in print contexts
by the media query.
= Device Rules =
-Phabricator's environment defines several device classes which can be used to
+Phorge's environment defines several device classes which can be used to
adjust behavior responsively. In particular:
lang=css
.device-phone {
/* Smallest breakpoint, usually for phones. */
}
.device-tablet {
/* Middle breakpoint, usually for tablets. */
}
.device-desktop {
/* Largest breakpoint, usually for desktops. */
}
Since many rules are specific to handheld devices, the `.device` class selects
either tablets or phones:
lang=css
.device {
/* Phone or tablet (not desktop). */
}
= Image Inlining =
-Phabricator's CSS preprocessor automatically inlines images which are less than
+Phorge's CSS preprocessor automatically inlines images which are less than
32KB using `data:` URIs. This is primarily useful for gradients or textures
which are small and difficult to sprite.
diff --git a/src/docs/contributor/database.diviner b/src/docs/contributor/database.diviner
index aaea485dc6..fc39c1ff1c 100644
--- a/src/docs/contributor/database.diviner
+++ b/src/docs/contributor/database.diviner
@@ -1,211 +1,213 @@
@title Database Schema
@group developer
This document describes key components of the database schema and should answer
questions like how to store new types of data.
Database System
===============
-Phabricator uses MySQL or another MySQL-compatible database (like MariaDB
+Phorge uses MySQL or another MySQL-compatible database (like MariaDB
or Amazon RDS).
-Phabricator uses the InnoDB table engine. The only exception is the
+Phorge uses the InnoDB table engine. The only exception is the
`search_documentfield` table which uses MyISAM because MySQL doesn't support
fulltext search in InnoDB (recent versions do, but we haven't added support
yet).
We are unlikely to ever support other incompatible databases like PostgreSQL or
SQLite.
PHP Drivers
===========
-Phabricator supports [[ http://www.php.net/book.mysql | MySQL ]] and
+Phorge supports [[ http://www.php.net/book.mysql | MySQL ]] and
[[ http://www.php.net/book.mysqli | MySQLi ]] PHP extensions.
Databases
=========
-Each Phabricator application has its own database. The names are prefixed by
-`phabricator_` (this is configurable).
+Each Phorge application has its own database. The names are prefixed by
+`phorge_` (this is configurable).
-Phabricator uses a separate database for each application. To understand why,
-see @{article:Why does Phabricator need so many databases?}.
+Phorge uses a separate database for each application. To understand why,
+see @{article:Why does Phorge need so many databases?}.
Connections
===========
-Phabricator specifies if it will use any opened connection just for reading or
+Phorge specifies if it will use any opened connection just for reading or
also for writing. This allows opening write connections to a primary and read
connections to a replica in primary/replica setups (which are not actually
supported yet).
Tables
======
Most table names are prefixed by their application names. For example,
-Differential revisions are stored in database `phabricator_differential` and
+Differential revisions are stored in database `phorge_differential` and
table `differential_revision`. This generally makes queries easier to recognize
and understand.
The exception is a few tables which share the same schema over different
databases such as `edge`.
We use lower-case table names with words separated by underscores.
Column Names
============
-Phabricator uses `camelCase` names for columns. The main advantage is that they
+Phorge uses `camelCase` names for columns. The main advantage is that they
directly map to properties in PHP classes.
Don't use MySQL reserved words (such as `order`) for column names.
Data Types
==========
-Phabricator defines a set of abstract data types (like `uint32`, `epoch`, and
+Phorge defines a set of abstract data types (like `uint32`, `epoch`, and
`phid`) which map to MySQL column types. The mapping depends on the MySQL
version.
-Phabricator uses `utf8mb4` character sets where available (MySQL 5.5 or newer),
+Phorge uses `utf8mb4` character sets where available (MySQL 5.5 or newer),
and `binary` character sets in most other cases. The primary motivation is to
allow 4-byte unicode characters to be stored (the `utf8` character set, which
is more widely available, does not support them). On newer MySQL, we use
`utf8mb4` to take advantage of improved collation rules.
-Phabricator stores dates with an `epoch` abstract data type, which maps to
+Phorge stores dates with an `epoch` abstract data type, which maps to
`int unsigned`. Although this makes dates less readable when browsing the
database, it makes date and time manipulation more consistent and
straightforward in the application.
We don't use the `enum` data type because each change to the list of possible
values requires altering the table (which is slow with big tables). We use
numbers (or short strings in some cases) mapped to PHP constants instead.
JSON and Other Serialized Data
==============================
Some data don't require structured access -- we don't need to filter or order by
them. We store these data as text fields in JSON format. This approach has
several advantages:
- If we decide to add another unstructured field then we don't need to alter
the table (which is slow for big tables in MySQL).
- Table structure is not cluttered by fields which could be unused most of the
time.
An example of such usage can be found in column
`differential_diffproperty.data`.
Primary Keys
============
Most tables have an auto-increment column named `id`. Adding an ID column is
appropriate for most tables (even tables that have another natural unique key),
as it improves consistency and makes it easier to perform generic operations
on objects.
For example, @{class:LiskMigrationIterator} allows you to very easily apply a
migration to a table using a constant amount of memory provided the table has
an `id` column.
Indexes
======
Create all indexes necessary for fast query execution in most cases. Don't
create indexes which are not used. You can analyze queries @{article:Using
DarkConsole}.
Older MySQL versions are not able to use indexes for tuple search:
`(a, b) IN ((%s, %d), (%s, %d))`. Use `AND` and `OR` instead:
`((a = %s AND b = %d) OR (a = %s AND b = %d))`.
Foreign Keys
============
We don't use foreign keys because they're complicated and we haven't experienced
significant issues with data inconsistency that foreign keys could help prevent.
Empirically, we have witnessed first hand as `ON DELETE CASCADE` relationships
accidentally destroy huge amounts of data. We may pursue foreign keys
eventually, but there isn't a strong case for them at the present time.
PHIDs
=====
-Each globally referencable object in Phabricator has an associated PHID
-("Phabricator ID") which serves as a global identifier, similar to a GUID.
+Each globally referencable object in Phorge has an associated PHID
+("Phorge ID") which serves as a global identifier, similar to a GUID.
We use PHIDs for referencing data in different databases.
We use both auto-incrementing IDs and global PHIDs because each is useful in
different contexts. Auto-incrementing IDs are meaningfully ordered and allow
us to construct short, human-readable object names (like `D2258`) and URIs.
Global PHIDs allow us to represent relationships between different types of
objects in a homogeneous way.
For example, infrastructure like "subscribers" can be implemented easily with
PHID relationships: different types of objects (users, projects, mailing lists)
are permitted to subscribe to different types of objects (revisions, tasks,
etc). Without PHIDs, we would need to add a "type" column to avoid ID collision;
using PHIDs makes implementing features like this simpler.
+For more information, see @{article:Handles Technical Documentation}
+
Transactions
============
Transactional code should be written using transactions. Example of such code is
inserting multiple records where one doesn't make sense without the other, or
selecting data later used for update. See chapter in @{class:LiskDAO}.
Advanced Features
=================
We don't use MySQL advanced features such as triggers, stored procedures or
events because we like expressing the application logic in PHP more than in SQL.
Some of these features (especially triggers) can also cause a great deal of
confusion, and are generally more difficult to debug, profile, version control,
update, and understand than application code.
Schema Denormalization
======================
-Phabricator uses schema denormalization sparingly. Avoid denormalization unless
+Phorge uses schema denormalization sparingly. Avoid denormalization unless
there is a compelling reason (usually, performance) to denormalize.
Schema Changes and Migrations
=============================
To create a new schema change or migration:
**Create a database patch**. Database patches go in
`resources/sql/autopatches/`. To change a schema, use a `.sql` file and write
in SQL. To perform a migration, use a `.php` file and write in PHP. Name your
file `YYYYMMDD.patchname.ext`. For example, `20141225.christmas.sql`.
**Keep patches small**. Most schema change statements are not transactional. If
a patch contains several SQL statements and fails partway through, it normally
can not be rolled back. When a user tries to apply the patch again later, the
first statement (which, for example, adds a column) may fail (because the column
already exists). This can be avoided by keeping patches small (generally, one
statement per patch).
**Use namespace and character set variables**. When defining a `.sql` patch,
you should use these variables instead of hard-coding namespaces or character
set names:
| Variable | Meaning | Notes |
|---|---|---|
| `{$NAMESPACE}` | Storage Namespace | Defaults to `phabricator` |
| `{$CHARSET}` | Default Charset | Mostly used to specify table charset |
| `{$COLLATE_TEXT}` | Text Collation | For most text (case-sensitive) |
| `{$COLLATE_SORT}` | Sort Collation | For sortable text (case-insensitive) |
| `{$CHARSET_FULLTEXT}` | Fulltext Charset | Specify explicitly for fulltext |
| `{$COLLATE_FULLTEXT}` | Fulltext Collate | Specify explicitly for fulltext |
**Test your patch**. Run `bin/storage upgrade` to test your patch.
See Also
========
- @{class:LiskDAO}
diff --git a/src/docs/contributor/describing_problems.diviner b/src/docs/contributor/describing_problems.diviner
index d06d7b7d64..30f3b4cad0 100644
--- a/src/docs/contributor/describing_problems.diviner
+++ b/src/docs/contributor/describing_problems.diviner
@@ -1,159 +1,159 @@
@title Describing Root Problems
@group detail
Explains how to describe a root problem effectively.
Overview
========
We receive many feature requests with poor problem descriptions. You may have
filed such a request if you've been sent here. This document explains what we
want, and how to give us the information to help you.
We will **never** implement a feature request without first understanding the
root problem.
Good problem descriptions let us answer your questions quickly and correctly,
and suggest workarounds or alternate ways to accomplish what you want.
Poor problem descriptions require us to ask multiple clarifying questions and
do not give us enough information to suggest alternate solutions or
workarounds. We need to keep going back and forth to understand the problem
you're really facing, which means it will take a long time to get the answer
you want.
What We Want
============
We want a description of your overarching goal. The problem you started trying
to solve first, long before you decided what feature you needed.
This doesn't need to be very detailed, we just need to know what you are
ultimately hoping to accomplish.
Problem descriptions should include context and explain why you're encountering
a problem and why it's important for you to resolve it.
Here are some examples of good ways to start a problem description:
-> My company does contracting work for government agencies. Because of the
-> nature of our customers, deadlines are critical and it's very important
-> for us to keep track of where we are on a timeline. We're using Maniphest
-> to track tasks...
+(NOTE) My company does contracting work for government agencies. Because of the
+ nature of our customers, deadlines are critical and it's very important
+for us to keep track of where we are on a timeline. We're using Maniphest
+to track tasks...
-> I have poor eyesight, and use a screenreader to help me use software like
-> Phabricator in my job as a developer. I'm having difficulty...
+(NOTE) I have poor eyesight, and use a screenreader to help me use software like
+ Phorge in my job as a developer. I'm having difficulty...
-> We work on a large server program which has very long compile times.
-> Switching branches is a huge pain (you have to rebuild the binary after
-> every switch, which takes about 8 minutes), but we've recently begun using
-> `git worktree` to help, which has made life a lot better. However, ...
+(NOTE) We work on a large server program which has very long compile times.
+ Switching branches is a huge pain (you have to rebuild the binary after
+ every switch, which takes about 8 minutes), but we've recently begun using
+ `git worktree` to help, which has made life a lot better. However, ...
-> I triage manual test failures from our offshore QA team. Here's how our
-> workflow works...
+(NOTE) I triage manual test failures from our offshore QA team. Here's how our
+ workflow works...
All of these descriptions are helpful: the provide context about what goals
you're trying to accomplish and why.
Here are some examples of ways to start a problem description that probably
are not very good:
-> {icon times color=red} Add custom keyboard shortcuts.
+(IMPORTANT) Add custom keyboard shortcuts.
-> {icon times color=red} I have a problem: there is no way to download
-> .tar archives of repositories.
+(IMPORTANT) I have a problem: there is no way to download
+ .tar archives of repositories.
-> {icon times color=red} I want an RSS feed of my tokens. My root problem is
-> that I do not have an RSS feed of my tokens.
+(IMPORTANT) I want an RSS feed of my tokens. My root problem is
+ that I do not have an RSS feed of my tokens.
-> {icon times color=red} There is no way to see other users' email addresses.
-> That is a problem.
+(IMPORTANT) There is no way to see other users' email addresses.
+ That is a problem.
-> {icon times color=red} I've used some other software that has a cool
-> feature. Phabricator should have that feature too.
+(IMPORTANT) I've used some other software that has a cool
+ feature. Phorge should have that feature too.
These problem descriptions are not helpful. They do not describe goals or
provide context.
"5 Whys" Technique
================
If you're having trouble understanding what we're asking for, one technique
which may help is ask yourself "Why?" repeatedly. Each answer will usually
get you closer to describing the root problem.
For example:
> I want custom keyboard shortcuts.
This is a very poor feature request which does not describe the root problem.
It limits us to only one possible solution. Try asking "Why?" to get closer
to the root problem.
> **Why?**
> I want to add a shortcut to create a new task.
This is still very poor, but we can now think about solutions involving making
this whole flow easier, or adding a shortcut for exactly this to the upstream,
which might be a lot easier than adding custom keyboard shortcuts.
It's common to stop here and report this as your root problem. This is **not**
a root problem. This problem is only //slightly// more general than the one
we started with. Let's ask "Why?" again to get closer to the root problem.
> **Why?**
> I create a lot of very similar tasks every day.
This is still quite poor, but we can now think about solutions like a bulk task
creation flow, or maybe point you at task creation templating or prefilling or
the Conduit API or email integration or Doorkeeper.
> **Why?**
> The other developers email me issues and I copy/paste them into Maniphest.
This is getting closer, but still doesn't tell us what your goal is.
> **Why?**
> We set up email integration before, but each task needs to have specific
> projects so that didn't work and now I'm stuck doing the entry by hand.
This is in the realm of reasonable, and likely easy to solve with custom
inbound addresses and Herald rules, or with a small extension to Herald. We
might try to improve the documentation to make the feature easier to discover
or understand.
You could (and should) go even further than this and explain why tasks need to
be tagged with specific projects. It's very easy to provide more context and
can only improve the speed and quality of our response.
Note that this solution (Herald rules on inbound email) has nothing to do with
the narrow feature request (keyboard shortcuts) that you otherwise arrived at,
but there's no possible way we can suggest a solution involving email
integration or Herald if your report doesn't even mention that part of the
context.
Additional Resources
====================
Poor problem descriptions are a common issue in software development and
extensively documented elsewhere. Here are some additional resources describing
how to describe problems and ask questions effectively:
- [[ http://www.catb.org/esr/faqs/smart-questions.html | How To Ask
Questions The Smart Way ]], by Eric S. Raymond
- [[ http://xyproblem.info | XY Problem ]]
- [[ https://en.wikipedia.org/wiki/5_Whys | 5 Whys Technique ]]
Asking good questions and describing problems clearly is an important,
fundamental communication skill that software professionals should cultivate.
Next Steps
==========
Continue by:
- returning to @{article:Contributing Feature Requests}.
diff --git a/src/docs/contributor/developer_setup.diviner b/src/docs/contributor/developer_setup.diviner
index 95508ccd19..8c39c5fc3d 100644
--- a/src/docs/contributor/developer_setup.diviner
+++ b/src/docs/contributor/developer_setup.diviner
@@ -1,112 +1,112 @@
@title Developer Setup
@group developer
-How to configure a Phabricator development environment.
+How to configure a Phorge development environment.
Overview
========
There are some options and workflows that may be useful if you are developing
-or debugging Phabricator.
+or debugging Phorge.
Configuration
=============
-To adjust Phabricator for development:
+To adjust Phorge for development:
- Enable `phabricator.developer-mode` to enable some options and show
more debugging information.
- Enable `phabricator.show-prototypes` to show all the incomplete
applications.
- See @{article: Using DarkConsole} for instructions on enabling the
debugging console.
Error Handling
==============
Errors normally go to DarkConsole (if enabled) and the webserver error log,
which is often located somewhere like `/var/log/apache/error_log`. This file
often contains relevant information after you encounter an error.
When debugging, you can print information to the error log with `phlog(...)`.
You can `phlog(new Exception(...))` to get a stack trace.
You can print information to the UI with `throw new Exception(...)`,
`print_r(...)`, or `var_dump(...)`.
You can abort execution with `die(...)` if you want to make sure execution
does not make it past some point. Normally `throw` does this too, but callers
can `catch` exceptions; they can not catch `die(...)`.
Utilities
=========
After adding, renaming, or moving classes, run `arc liberate` to rebuild
the class map:
```
-phabricator/ $ arc liberate
+phorge/ $ arc liberate
```
-Until you do this, Phabricator won't recognize your new, moved, or renamed
+Until you do this, Phorge won't recognize your new, moved, or renamed
classes. You do not need to run this after modifying an existing class.
After any modifications to static resources (CSS / JS) but before sending
changes for review or pushing them to the remote, run `bin/celerity map`:
```
-phabricator/ $ ./bin/celerity map
+phorge/ $ ./bin/celerity map
```
This rebuilds the static resource map.
If you forget to run these commands you'll normally be warned by unit tests,
but knowing about them may prevent confusion before you hit the warnings.
Command Line
============
Almost every script supports a `--trace` flag, which prints out service
calls and more detailed error information. This is often the best way to get
started with debugging command-line scripts.
Performance
===========
Although it is more user-focused than developer-focused, the
@{article:Troubleshooting Performance Problems} guide has useful information
on the tools available for diagnosing and understanding performance problems.
Custom Domains
==============
If you're working with applications that support custom domains (like Phurl or
Phame) you can normally test them by adding more entries to your webserver
configuration that look exactly like the primary entry (or expanding the
primary entry to match more domains).
-Phabricator routes all requests based on host headers, so alternate domains
+Phorge routes all requests based on host headers, so alternate domains
do not normally need any kind of special configuration.
You may also need to add `/etc/hosts` entries for the domains themselves.
Creating Test Data
==================
You can create test objects with the "Lipsum" utility:
```
-phabricator/ $ ./bin/lipsum help generate
-phabricator/ $ ./bin/lipsum generate ...
+phorge/ $ ./bin/lipsum help generate
+phorge/ $ ./bin/lipsum generate ...
```
Test data can make your local install feel a little more realistic. With
`--quickly`, you can generate a large amount of test data to help test issues
with performance or scale.
diff --git a/src/docs/contributor/feature_requests.diviner b/src/docs/contributor/feature_requests.diviner
index 20fe4b2d30..b2ca702cec 100644
--- a/src/docs/contributor/feature_requests.diviner
+++ b/src/docs/contributor/feature_requests.diviner
@@ -1,4 +1,211 @@
@title Contributing Feature Requests
@group detail
-Effective June 1, 2021: Phabricator is no longer actively maintained, and there is no way to file a feature request.
+Describes how to file an effective Phorge feature request.
+
+Overview
+========
+
+Phorge is an open-source project, and welcomes feature requests from the community
+at large. However, there are some guidelines we ask you to follow.
+
+Overview
+========
+
+This article describes how to file an effective feature request.
+
+The most important things to do are:
+
+ - understand the upstream;
+ - make sure your feature makes sense in the project;
+ - align your expectations around timelines and priorities;
+ - describe your problem, not your solution.
+
+The rest of this article walks through these points in detail.
+
+If you have a bug report (not a feature request), see
+@{article:Contributing Bug Reports} for a more tailored guide.
+
+For general information on contributing to Phorge, see
+@{article:Contributor Introduction}.
+
+
+Understanding the Upstream
+==========================
+
+Before filing a feature request, it may be useful to understand how the
+upstream operates.
+
+Phorge has a designated core team who controls the project and roadmap.
+We have a cohesive vision for the project in the long term, and a general
+roadmap that extends for years into the future. While the specifics of how
+we get there are flexible, many major milestones are well-established.
+
+Although we set project direction, the community is also a critical part of
+Phorge. We aren't all-knowing, and we rely on feedback to help us identify
+issues, guide product direction, prioritize changes, and suggest features.
+
+Feature requests are an important part of this, but we ultimately build only
+features which make sense as part of the long term plan.
+
+Since it's hard to absorb a detailed understanding of that vision, //describing
+a problem// is often more effective than //requesting a feature//. We have the
+context to develop solutions which fit into our plans, address similar use
+cases, make sense with the available infrastructure, and work within the
+boundaries of our product vision. For more details on this, see below.
+
+
+Target Audiences
+================
+
+Some feature requests support very unusual use cases. Although we are broadly
+inclusive of many different kinds of users and use cases, we are not trying
+to make the software all things to all users. Use cases which are far afield
+from the things the majority of users do with Phorge often face substantial
+barriers.
+
+Phorge is primarily targeted at software projects and organizations with
+a heavy software focus. We are most likely to design, build, and prioritize
+features which serve these organizations and projects.
+
+Phorge is primarily targeted at software professionals and other
+professionals with adjacent responsibilities (like project management and
+operations). Particularly, we assume users are proficient computer users and
+familiar with software development concepts. We are most likely to design, build
+and prioritize features which serve these users.
+
+Phorge is primarily targeted at professionals working in teams on full-time
+projects. Particularly, we assume most users will use the software regularly and
+are often willing to spend a little more time up front to get a more efficient
+workflow in the long run. We are most likely to design, build and prioritize
+features which serve these use cases.
+
+Phorge is not limited to these kinds of organizations, users and use cases,
+but features which are aimed at a different group of users (like students,
+casual projects, or inexperienced computer users) may be harder to get
+upstreamed. Features aimed at very different groups of users (like wedding
+planners, book clubs, or dogs) will be much harder to get upstreamed.
+
+In many cases, a feature makes something better for all users. For example,
+suppose we fixed an issue where colorblind users had difficulty doing something.
+Dogs would benefit the most, but colorblind human users would also benefit, and
+no one would be worse off. If the benefit for core users is very small these
+kinds of features may be hard to prioritize, but there is no exceptional barrier
+to getting them upstreamed.
+
+In other cases, a feature makes something better for some users and worse for
+other users. These kinds of features face a high barrier if they make the
+software better at planning weddings and worse at reviewing code.
+
+
+Setting Expectations
+====================
+
+We have a lot of users and a small team. Even if your feature is something we're
+interested in and a good fit for where we want the product to go, it may take
+us a long time to get around to building it.
+
+Our long-term roadmap (which we call our
+[[ https://we.phorge.it/w/starmap/ | Starmap ]]) has many years worth
+of work. Your feature request is competing against thousands of other requests
+for priority.
+
+In general, we try to prioritize work that will have the greatest impact on the
+most users. Many feature requests are perfectly reasonable requests, but have
+very little impact, impact only a few users, and/or are complex to develop and
+support relative to their impact. It can take us a long time to get to these.
+
+Even if your feature request is simple and has substantial impact for a large
+number of users, the size of the request queue means that it is mathematically
+unlikely to be near the top.
+
+As a whole, this means that the overwhelming majority of feature requests will
+sit in queue for a long time without any updates, and that we won't be able to
+give you any updates or predictions about timelines. One day, out of nowhere,
+your feature will materialize. That day may be a decade from now. You should
+have realistic expectations about this when filing a feature request.
+
+
+Describe Problems
+=================
+
+When you file a feature request, we need you to describe the problem you're
+facing first, not just your desired solution. Describing the problem you are
+facing is the **most important part** of a feature request.
+
+Often, your problem may have a lot in common with other similar problems. If we
+understand your use case we can compare it to other use cases and sometimes find
+a more powerful or more general solution which solves several problems at once.
+
+At other times, we'll have a planned solution to the problem that might be
+different from your desired solution but accomplish the same goal. Understanding
+the root issue can let us merge and contextualize things.
+
+Sometimes there's already a way to solve your problem that might just not be
+obvious.
+
+Finally, your proposed solution may not be compatible with the direction we
+want to take the product, but we may be able to come up with another solution
+which has approximately the same effect and does fit into the product direction.
+
+If you only describe the solution and not the problem, we can't generalize,
+contextualize, merge, reframe, or offer alternative solutions or workarounds.
+
+You must describe the problem you are facing when filing a feature request. We
+will not accept feature requests which do not contextualize the request by
+describing the root problem.
+
+If you aren't sure exactly what we're after when we ask you to describe a root
+problem, you can find examples and more discussion in
+@{article:Describing Root Problems}.
+
+
+Hypotheticals
+=============
+
+We sometimes receive hypothetical feature requests about anticipated problems
+or concerns which haven't actually occurred yet. We usually can't do much about
+these until the problems actually occur, since the context required to
+understand and properly fix the root issue won't exist.
+
+One situation where this happens is when installs are thinking about adopting
+Phorge and trying to guess what problems users might encounter during the
+transition. More generally, this includes any request like "if users do **X**,
+they might find **Y** confusing", where no actual users have encountered
+confusion yet.
+
+These requests are necessarily missing important context, maybe including the
+answers to questions like these:
+
+ - Why did users do **X**?
+ - What were they trying to do?
+ - What did they expect to happen?
+ - How often do users do this?
+
+The answers to these questions are important in establishing that the issue is
+really a problem, figuring out the best solution for it, and prioritizing the
+issue relative to other issues.
+
+Without knowing this information, we can't be confident that we've found a good
+solution to the problem, can't know if we've actually fixed the problem, and
+can't even know if the issue was really a problem in the first place (some
+hypothetical requests describe problems which no users ever encounter).
+
+We usually can't move forward without this information. In particular, we don't
+want to spend time solving hypothetical problems which no real users will ever
+encounter: the value of those changes is zero (or negative, by making the
+product more complex without providing a benefit), but they consume development
+time which could be better spent building much more valuable features.
+
+Generally, you should wait until a problem actually occurs before filing a
+request about it.
+
+
+Next Steps
+==========
+
+Continue by:
+
+ - learning about @{article: Contributing Bug Reports}; or
+ - reading general support information in @{article:Support Resources}; or
+ - returning to the @{article:Contributor Introduction}.
diff --git a/src/docs/contributor/general_coding_standards.diviner b/src/docs/contributor/general_coding_standards.diviner
index 9b151312fd..5127aebbfc 100644
--- a/src/docs/contributor/general_coding_standards.diviner
+++ b/src/docs/contributor/general_coding_standards.diviner
@@ -1,148 +1,148 @@
@title General Coding Standards
@group standards
-This document is a general coding standard for contributing to Phabricator,
+This document is a general coding standard for contributing to Phorge,
Arcanist, and Diviner.
= Overview =
This document contains practices and guidelines which apply across languages.
Contributors should follow these guidelines. These guidelines are not
hard-and-fast but should be followed unless there is a compelling reason to
deviate from them.
= Code Complexity =
- Prefer to write simple code which is easy to understand. The simplest code
is not necessarily the smallest, and some changes which make code larger
(such as decomposing complex expressions and choosing more descriptive
names) may also make it simpler. Be willing to make size tradeoffs in favor
of simplicity.
- Prefer simple methods and functions which take a small number of parameters.
Avoid methods and functions which are long and complex, or take an
innumerable host of parameters. When possible, decompose monolithic, complex
methods into several focused, simpler ones.
- Avoid putting many ideas on a single line of code.
For example, avoid this kind of code:
COUNTEREXAMPLE
$category_map = array_combine(
$dates,
array_map(create_function('$z', 'return date("F Y", $z);'), $dates));
Expressing this complex transformation more simply produces more readable code:
$category_map = array();
foreach ($dates as $date) {
$category_map[$date] = date('F Y', $date);
}
And, obviously, don't do this sort of thing:
COUNTEREXAMPLE
if ($val = $some->complicatedConstruct() && !!~blarg_blarg_blarg() & $flags
? HOPE_YOU_MEMORIZED == $all_the_lexical_binding_powers : <<<'Q'
${hahaha}
Q
);
= Performance =
- Prefer to write efficient code.
- Strongly prefer to drive optimization decisions with hard data. Avoid
optimizing based on intuition or rumor if you can not support it with
concrete measurements.
- Prefer to optimize code which is slow and runs often. Optimizing code which
is fast and runs rarely is usually a waste of time, and can even be harmful
if it makes that code more difficult to understand or maintain. You can
determine if code is fast or slow by measuring it.
- Reject performance discussions that aren't rooted in concrete data.
-In Phabricator, you can usually use the builtin XHProf profiling to quickly
+In Phorge, you can usually use the builtin XHProf profiling to quickly
gather concrete performance data.
= Naming Things =
- Follow language-specific conventions.
- Name things unambiguously.
- Choose descriptive names.
- Avoid nonstandard abbreviations (common abbreviations like ID, URI and HTTP
are fine).
- Spell words correctly.
- Use correct grammar.
For example, avoid these sorts of naming choices:
COUNTEREXAMPLE
$PIE->GET_FLAVOR(); // Unconventional.
$thing->doStuff(); // Ambiguous.
$list->empty(); // Ambiguous -- is it isEmpty() or makeEmpty()?
$e = 3; // Not descriptive.
$this->updtHndlr(); // Nonstandard abbreviation.
$this->chackSpulls(); // Misspelling, ungrammatical.
Prefer these:
$pie->getFlavor(); // Conventional.
$pie->bake(); // Unambiguous.
$list->isEmpty(); // Unambiguous.
$list->makeEmpty(); // Unambiguous.
$edge_count = 3; // Descriptive.
$this->updateHandler(); // No nonstandard abbreviations.
$this->getID(); // Standard abbreviation.
$this->checkSpelling(); // Correct spelling and grammar.
= Error Handling =
- Strongly prefer to detect errors.
- Strongly prefer to fail fast and loudly. The maximum cost of script
termination is known, bounded, and fairly small. The maximum cost of
continuing script execution when errors have occurred is unknown and
unbounded. This also makes APIs much easier to use and problems far easier
to debug.
When you ignore errors, defer error handling, or degrade the severity of errors
by treating them as warnings and then dismissing them, you risk dangerous
behavior which may be difficult to troubleshoot:
COUNTEREXAMPLE
exec('echo '.$data.' > file.bak'); // Bad!
do_something_dangerous();
exec('echo '.$data.' > file.bak', $out, $err); // Also bad!
if ($err) {
debug_rlog("Unable to copy file!");
}
do_something_dangerous();
Instead, fail loudly:
exec('echo '.$data.' > file.bak', $out, $err); // Better
if ($err) {
throw new Exception("Unable to copy file!");
}
do_something_dangerous();
But the best approach is to use or write an API which simplifies condition
handling and makes it easier to get right than wrong:
execx('echo %s > file.bak', $data); // Good
do_something_dangerous();
Filesystem::writeFile('file.bak', $data); // Best
do_something_dangerous();
See @{article@arcanist:Command Execution} for details on the APIs used in this
example.
= Documentation, Comments and Formatting =
- Prefer to remove code by deleting it over removing it by commenting it out.
It shall live forever in source control, and can be retrieved therefrom if
it is ever again called upon.
- In source code, use only ASCII printable characters plus space and linefeed.
Do not use UTF-8 or other multibyte encodings.
diff --git a/src/docs/tech/handles.diviner b/src/docs/contributor/handles.diviner
similarity index 97%
rename from src/docs/tech/handles.diviner
rename to src/docs/contributor/handles.diviner
index 18e33dc133..ffdd0f0705 100644
--- a/src/docs/tech/handles.diviner
+++ b/src/docs/contributor/handles.diviner
@@ -1,101 +1,101 @@
@title Handles Technical Documentation
-@group handles
+@group developer
Technical overview of Handles.
Overview
========
-Most objects in Phabricator have PHIDs, which are globally unique identifiers
+Most objects in Phorge have PHIDs, which are globally unique identifiers
that look like `PHID-USER-2zw4hwdt4i5b5ypikv6x`. If you know the PHID for an
object, you can load a **handle** for that object to get more information
about it.
Handles are lightweight reference objects which provide some basic information
common across all objects (like their type, icons, names, monograms, URIs, and
whether they are open or closed). Applications don't need to know anything about
other types of objects in order to load and use handles. There are uniform
mechanisms available to load and work with handles which work across all types
of objects in every application.
Loading Handles
===============
To load handles, you'll usually call `loadHandles(...)` on the viewer:
$handles = $viewer->loadHandles($phids);
This returns a @{class:PhabricatorHandleList}. This object behaves like an
array, and you can access handle objects by using their PHIDs as indexes:
$handle = $handles[$phid];
Handles will always load, even if the PHID is invalid or the object it
identifies is restricted or broken. In these cases, the handle will accurately
represent the state of the associated object. This means that you generally do
not need to check if a handle loaded.
Rendering Handles
=================
After loading handles, you'll usually call `renderHandle($phid)` to render a
link to an object:
$view = $handles->renderHandle($phid);
This returns a @{class:PHUIHandleView}. The class exposes some methods which
can adjust how the handle renders.
If you want to render a list of handles, you can use `renderList()`:
$list_view = $handles->renderList();
This returns a @{class:PHUIHandleListView}. This class also exposes some
methods to adjust how the list renders.
Convenience methods for these operations are also available on the viewer
object itself:
$view = $viewer->renderHandle($phid);
$list_view = $viewer->renderHandleList($phids);
When you only need to render a handle once, these methods make it easier.
Fetch Semantics
===============
When you load and render handles through the viewer, the actual data fetching
occurs just-in-time. Specifically, all of the required PHIDs are queued up
until a concrete representation //needs// to be produced. Handles are then bulk
loaded.
This means that, unlike most other types of data fetching, it's OK to
single-fetch handles, because they won't //really// single-fetch. This code is
correct and desirable:
$list->addProperty(pht('Pilot'), $viewer->renderHandle($pilot_phid));
$list->addProperty(pht('Copilot'), $viewer->renderHandle($copilot_phid));
If you're rendering a very large number of handles (for example, 100+ handles
in a result list view) it's //slightly// more efficient to render them through
a @{class:PhabricatorHandleList}:
$handles = $viewer->loadHandles($phids);
foreach ($items as $item) {
// ...
$view = $handles->renderHandle($item->getPHID());
// ...
}
This shaves off a tiny bit of internal bookkeeping overhead. This does not
change the underlying semantics of the data fetch.
Handles are particularly well suited to use this just-in-time fetch pattern
because they're ubiquitous and code essentially never makes decisions based on
handles, so it's very rare that they need to be made concrete until final page
rendering. Most other kinds of data do not have the same sort of
application-level semantics. This generally makes other objects much less
suitable to be fetched just-in-time.
diff --git a/src/docs/contributor/internationalization.diviner b/src/docs/contributor/internationalization.diviner
index 99c35e675e..84fe2d2591 100644
--- a/src/docs/contributor/internationalization.diviner
+++ b/src/docs/contributor/internationalization.diviner
@@ -1,381 +1,381 @@
@title Internationalization
@group developer
-Describes Phabricator translation and localization.
+Describes Phorge translation and localization.
Overview
========
-Phabricator partially supports internationalization, but many of the tools
+Phorge partially supports internationalization, but many of the tools
are missing or in a prototype state.
This document describes what tools exist today, how to add new translations,
and how to use the translation tools to make a codebase translatable.
Adding a New Locale
===================
To add a new locale, subclass @{class:PhutilLocale}. This allows you to
introduce a new locale, like "German" or "Klingon".
Once you've created a locale, applications can add translations for that
locale.
For instructions on adding new classes, see
-@{article@phabcontrib:Adding New Classes}.
+@{article@contrib:Adding New Classes}.
Adding Translations to Locale
=============================
To translate strings, subclass @{class:PhutilTranslation}. Translations need
to belong to a locale: the locale defines an available language, and each
translation subclass provides strings for it.
Translations are separated from locales so that third-party applications can
provide translations into different locales without needing to define those
locales themselves.
For instructions on adding new classes, see
-@{article@phabcontrib:Adding New Classes}.
+@{article@contrib:Adding New Classes}.
Writing Translatable Code
=========================
Strings are marked for translation with @{function@arcanist:pht}.
The `pht()` function takes a string (and possibly some parameters) and returns
the translated version of that string in the current viewer's locale, if a
translation is available.
If text strings will ultimately be read by humans, they should essentially
always be wrapped in `pht()`. For example:
```lang=php
$dialog->appendParagraph(pht('This is an example.'));
```
This allows the code to return the correct Spanish or German or Russian
-version of the text, if the viewer is using Phabricator in one of those
+version of the text, if the viewer is using Phorge in one of those
languages and a translation is available.
Using `pht()` properly so that strings are translatable can be tricky. Briefly,
the major rules are:
- Only pass static strings as the first parameter to `pht()`.
- Use parameters to create strings containing user names, object names, etc.
- Translate full sentences, not sentence fragments.
- Let the translation framework handle plural rules.
- Use @{class@arcanist:PhutilNumber} for numbers.
- Let the translation framework handle subject gender rules.
- Translate all human-readable text, even exceptions and error messages.
See the next few sections for details on these rules.
Use Static Strings
==================
The first parameter to `pht()` must always be a static string. Broadly, this
means it should not contain variables or function or method calls (it's OK to
split it across multiple lines and concatenate the parts together).
These are good:
```lang=php
pht('The night is dark.');
pht(
'Two roads diverged in a yellow wood, '.
'and sorry I could not travel both '.
'and be one traveler, long I stood.');
```
These won't work (they might appear to work, but are wrong):
```lang=php, counterexample
pht(some_function());
pht('The duck says, '.$quack);
pht($string);
```
The first argument must be a static string so it can be extracted by static
analysis tools and dumped in a big file for translators. If it contains
functions or variables, it can't be extracted, so translators won't be able to
translate it.
Lint will warn you about problems with use of static strings in calls to
`pht()`.
Parameters
==========
You can provide parameters to a translation string by using `sprintf()`-style
patterns in the input string. For example:
```lang=php
pht('%s earned an award.', $actor);
pht('%s closed %s.', $actor, $task);
```
This is primarily appropriate for usernames, object names, counts, and
untranslatable strings like URIs or instructions to run commands from the CLI.
Parameters normally should not be used to combine two pieces of translated
text: see the next section for guidance.
Sentence Fragments
==================
You should almost always pass the largest block of text to `pht()` that you
can. Particularly, it's important to pass complete sentences, not try to build
a translation by stringing together sentence fragments.
There are several reasons for this:
- It gives translators more context, so they can be more confident they are
producing a satisfying, natural-sounding translation which will make sense
and sound good to native speakers.
- In some languages, one fragment may need to translate differently depending
on what the other fragment says.
- In some languages, the most natural-sounding translation may change the
order of words in the sentence.
For example, suppose we want to translate these sentence to give the user some
instructions about how to use an interface:
> Turn the switch to the right.
> Turn the switch to the left.
> Turn the dial to the right.
> Turn the dial to the left.
Maybe we have a function like this:
```
function get_string($is_switch, $is_right) {
// ...
}
```
One way to write the function body would be like this:
```lang=php, counterexample
$what = $is_switch ? pht('switch') : pht('dial');
$dir = $is_right ? pht('right') : pht('left');
return pht('Turn the ').$what.pht(' to the ').$dir.pht('.');
```
This will work fine in English, but won't work well in other languages.
One problem with doing this is handling gendered nouns. Languages like Spanish
have gendered nouns, where some nouns are "masculine" and others are
"feminine". The gender of a noun affects which article (in English, the word
"the" is an article) should be used with it.
In English, we say "**the** knob" and "**the** switch", but a Spanish speaker
would say "**la** perilla" and "**el** interruptor", because the noun for
"knob" in Spanish is feminine (so it is used with the article "la") while the
noun for "switch" is masculine (so it is used with the article "el").
A Spanish speaker can not translate the string "Turn the" correctly without
knowing which gender the noun has. Spanish has //two// translations for this
string ("Gira el", "Gira la"), and the form depends on which noun is being
used.
Another problem is that this reduces flexibility. Translating fragments like
this locks translators into a specific word order, when rearranging the words
might make the sentence sound much more natural to a native speaker.
For example, if the string read "The knob, to the right, turn it.", it
would technically be English and most English readers would understand the
meaning, but no native English speaker would speak or write like this.
However, some languages have different subject-verb order rules or
colloquialisms, and a word order which transliterates like this may sound more
natural to a native speaker. By translating fragments instead of complete
sentences, you lock translators into English word order.
Finally, the last fragment is just a period. If a translator is presented with
this string in an interface without much context, they have no hope of guessing
how it is used in the software (it could be an end-of-sentence marker, or a
decimal point, or a date separator, or a currency separator, all of which have
very different translations in many locales). It will also conflict with all
other translations of the same string in the codebase, so even if they are
given context they can't translate it without technical problems.
To avoid these issues, provide complete sentences for translation. This almost
always takes the form of writing out alternatives in full. This is a good way
to implement the example function:
```lang=php
if ($is_switch) {
if ($is_right) {
return pht('Turn the switch to the right.');
} else {
return pht('Turn the switch to the left.');
}
} else {
if ($is_right) {
return pht('Turn the dial to the right.');
} else {
return pht('Turn the dial to the left.');
}
}
```
Although this is more verbose, translators can now get genders correct,
rearrange word order, and have far more context when translating. This enables
better, natural-sounding translations which are more satisfying to native
speakers.
Singular and Plural
===================
Different languages have various rules for plural nouns.
In English there are usually two plural noun forms: for one thing, and any
other number of things. For example, we say that one chair is a "chair" and any
other number of chairs are "chairs": "0 chairs", "1 chair", "2 chairs", etc.
In other languages, there are different (and, in some cases, more) plural
forms. For example, in Czech, there are separate forms for "one", "several",
and "many".
Because plural noun rules depend on the language, you should not write code
which hard-codes English rules. For example, this won't translate well:
```lang=php, counterexample
if ($count == 1) {
return pht('This will take an hour.');
} else {
return pht('This will take hours.');
}
```
This code is hard-coding the English rule for plural nouns. In languages like
Czech, the correct word for "hours" may be different if the count is 2 or 15,
but a translator won't be able to provide the correct translation if the string
is written like this.
Instead, pass a generic string to the translation engine which //includes// the
number of objects, and let it handle plural nouns. This is the correct way to
write the translation:
```lang=php
return pht('This will take %s hour(s).', new PhutilNumber($count));
```
If you now load the web UI, you'll see "hour(s)" literally in the UI. To fix
this so the translation sounds better in English, provide translations for this
-string in the @{class@phabricator:PhabricatorUSEnglishTranslation} file:
+string in the @{class:PhabricatorUSEnglishTranslation} file:
```lang=php
'This will take %s hour(s).' => array(
'This will take an hour.',
'This will take hours.',
),
```
The string will then sound natural in English, but non-English translators will
also be able to produce a natural translation.
Note that the translations don't actually include the number in this case. The
number is being passed from the code, but that just lets the translation engine
get the rules right: the number does not need to appear in the final
translations shown to the user.
Using PhutilNumber
==================
When translating numbers, you should almost always use `%s` and wrap the count
or number in `new PhutilNumber($count)`. For example:
```lang=php
pht('You have %s experience point(s).', new PhutilNumber($xp));
```
This will let the translation engine handle plural noun rules correctly, and
also format large numbers correctly in a locale-aware way with proper unit and
decimal separators (for example, `1000000` may be printed as "1,000,000",
with commas for readability).
The exception to this rule is IDs which should not be written with unit
separators. For example, this is correct for an object ID:
```lang=php
pht('This diff has ID %d.', $diff->getID());
```
Male and Female
===============
Different languages also use different words for talking about subjects who are
male, female or have an unknown gender. In English this is mostly just
pronouns (like "he" and "she") but there are more complex rules in other
languages, and languages like Czech also require verb agreement.
When a parameter refers to a gendered person, pass an object which implements
@{interface@arcanist:PhutilPerson} to `pht()` so translators can provide
gendered translation variants.
```lang=php
pht('%s wrote', $actor);
```
Translators will create these translations:
```lang=php
// English translation
'%s wrote';
// Czech translation
array('%s napsal', '%s napsala');
```
(You usually don't need to worry very much about this rule, it is difficult to
get wrong in standard code.)
Exceptions and Errors
=====================
You should translate all human-readable text, even exceptions and error
messages. This is primarily a rule of convenience which is straightforward
and easy to follow, not a technical rule.
Some exceptions and error messages don't //technically// need to be translated,
as they will never be shown to a user, but many exceptions and error messages
are (or will become) user-facing on some way. When writing a message, there is
often no clear and objective way to determine which type of message you are
writing. Rather than try to distinguish which are which, we simply translate
all human-readable text. This rule is unambiguous and easy to follow.
In cases where similar error or exception text is often repeated, it is
probably appropriate to define an exception for that category of error rather
than write the text out repeatedly, anyway. Two examples are
@{class@arcanist:PhutilInvalidStateException} and
@{class@arcanist:PhutilMethodNotImplementedException}, which mostly exist to
produce a consistent message about a common error state in a convenient way.
There are a handful of error strings in the codebase which may be used before
the translation framework is loaded, or may be used during handling other
errors, possibly raised from within the translation framework. This handful
of special cases are left untranslated to prevent fatals and cycles in the
error handler.
Next Steps
==========
Continue by:
- adding a new locale or translation file with
- @{article@phabcontrib:Adding New Classes}.
+ @{article@contrib:Adding New Classes}.
diff --git a/src/docs/contributor/javascript_coding_standards.diviner b/src/docs/contributor/javascript_coding_standards.diviner
index 3b47a566a6..39103e94ec 100644
--- a/src/docs/contributor/javascript_coding_standards.diviner
+++ b/src/docs/contributor/javascript_coding_standards.diviner
@@ -1,139 +1,139 @@
@title Javascript Coding Standards
@group standards
-This document describes Javascript coding standards for Phabricator and Javelin.
+This document describes Javascript coding standards for Phorge and Javelin.
= Overview =
This document outlines technical and style guidelines which are followed in
-Phabricator and Javelin. Contributors should also follow these guidelines. Many
+Phorge and Javelin. Contributors should also follow these guidelines. Many
of these guidelines are automatically enforced by lint.
These guidelines are essentially identical to the Facebook guidelines, since I
basically copy-pasted them. If you are already familiar with the Facebook
guidelines, you can probably get away with skimming this document.
= Spaces, Linebreaks and Indentation =
- Use two spaces for indentation. Don't use literal tab characters.
- Use Unix linebreaks ("\n"), not MSDOS ("\r\n") or OS9 ("\r").
- Put a space after control keywords like `if` and `for`.
- Put a space after commas in argument lists.
- Put space around operators like `=`, `<`, etc.
- Don't put spaces after function names.
- Parentheses should hug their contents.
- Generally, prefer to wrap code at 80 columns.
= Case and Capitalization =
The Javascript language unambiguously dictates casing/naming rules; follow those
rules.
- Name variables using `lowercase_with_underscores`.
- Name classes using `UpperCamelCase`.
- Name methods and properties using `lowerCamelCase`.
- Name global functions using `lowerCamelCase`. Avoid defining global
functions.
- Name constants using `UPPERCASE`.
- Write `true`, `false`, and `null` in lowercase.
- "Internal" methods and properties should be prefixed with an underscore.
For more information about what "internal" means, see
**Leading Underscores**, below.
= Comments =
- Strongly prefer `//` comments for making comments inside the bodies of
functions and methods (this lets someone easily comment out a block of code
while debugging later).
= Javascript Language =
- Use `[]` and `{}`, not `new Array` and `new Object`.
- When creating an object literal, do not quote keys unless required.
= Examples =
**if/else:**
lang=js
if (x > 3) {
// ...
} else if (x === null) {
// ...
} else {
// ...
}
You should always put braces around the body of an if clause, even if it is only
one line. Note that operators like `>` and `===` are also surrounded by
spaces.
**for (iteration):**
lang=js
for (var ii = 0; ii < 10; ii++) {
// ...
}
Prefer ii, jj, kk, etc., as iterators, since they're easier to pick out
visually and react better to "Find Next..." in editors.
**for (enumeration):**
lang=js
for (var k in obj) {
// ...
}
Make sure you use enumeration only on Objects, not on Arrays. For more details,
see @{article:Javascript Object and Array}.
**switch:**
lang=js
switch (x) {
case 1:
// ...
break;
case 2:
if (flag) {
break;
}
break;
default:
// ...
break;
}
`break` statements should be indented to block level. If you don't push them
in, you end up with an inconsistent rule for conditional `break` statements,
as in the `2` case.
If you insist on having a "fall through" case that does not end with `break`,
make it clear in a comment that you wrote this intentionally. For instance:
lang=js
switch (x) {
case 1:
// ...
// Fall through...
case 2:
//...
break;
}
= Leading Underscores =
By convention, methods names which start with a leading underscore are
considered "internal", which (roughly) means "private". The critical difference
is that this is treated as a signal to Javascript processing scripts that a
symbol is safe to rename since it is not referenced outside the current file.
The upshot here is:
- name internal methods which shouldn't be called outside of a file's scope
with a leading underscore; and
- **never** call an internal method from another file.
If you treat them as though they were "private", you won't run into problems.
diff --git a/src/docs/contributor/n_plus_one.diviner b/src/docs/contributor/n_plus_one.diviner
index 6d259671a1..21bac266e7 100644
--- a/src/docs/contributor/n_plus_one.diviner
+++ b/src/docs/contributor/n_plus_one.diviner
@@ -1,77 +1,77 @@
@title Performance: N+1 Query Problem
@group developer
How to avoid a common performance pitfall.
= Overview =
The N+1 query problem is a common performance antipattern. It looks like this:
COUNTEREXAMPLE
$cats = load_cats();
foreach ($cats as $cat) {
$cats_hats = load_hats_for_cat($cat);
// ...
}
Assuming `load_cats()` has an implementation that boils down to:
SELECT * FROM cat WHERE ...
..and `load_hats_for_cat($cat)` has an implementation something like this:
SELECT * FROM hat WHERE catID = ...
..you will issue "N+1" queries when the code executes, where N is the number of
cats:
SELECT * FROM cat WHERE ...
SELECT * FROM hat WHERE catID = 1
SELECT * FROM hat WHERE catID = 2
SELECT * FROM hat WHERE catID = 3
SELECT * FROM hat WHERE catID = 4
SELECT * FROM hat WHERE catID = 5
...
The problem with this is that each query has quite a bit of overhead. **It is
//much faster// to issue 1 query which returns 100 results than to issue 100
queries which each return 1 result.** This is particularly true if your database
is on a different machine which is, say, 1-2ms away on the network. In this
case, issuing 100 queries serially has a minimum cost of 100-200ms, even if they
can be satisfied instantly by MySQL. This is far higher than the entire
-server-side generation cost for most Phabricator pages should be.
+server-side generation cost for most Phorge pages should be.
= Batching Queries =
Fix the N+1 query problem by batching queries. Load all your data before
iterating through it (this is oversimplified and omits error checking):
$cats = load_cats();
$hats = load_all_hats_for_these_cats($cats);
foreach ($cats as $cat) {
$cats_hats = $hats[$cat->getID()];
}
That is, issue these queries:
SELECT * FROM cat WHERE ...
SELECT * FROM hat WHERE catID IN (1, 2, 3, 4, 5, ...)
In this case, the total number of queries issued is always 2, no matter how many
objects there are. You've removed the "N" part from the page's query plan, and
are no longer paying the overhead of issuing hundreds of extra queries. This
will perform much better (although, as with all performance changes, you should
verify this claim by measuring it).
See also @{method:LiskDAO::loadRelatives} method which provides an abstraction
to prevent this problem.
= Detecting the Problem =
Beyond reasoning about it while figuring out how to load the data you need, the
easiest way to detect this issue is to check the "Services" tab in DarkConsole
(see @{article:Using DarkConsole}), which lists all the service calls made on a
page. If you see a bunch of similar queries, this often indicates an N+1 query
issue (or a similar kind of query batching problem). Restructuring code so you
can run a single query to fetch all the data at once will always improve the
performance of the page.
diff --git a/src/docs/contributor/phabricator_code_layout.diviner b/src/docs/contributor/phorge_code_layout.diviner
similarity index 78%
rename from src/docs/contributor/phabricator_code_layout.diviner
rename to src/docs/contributor/phorge_code_layout.diviner
index 422f228a27..fee99ed897 100644
--- a/src/docs/contributor/phabricator_code_layout.diviner
+++ b/src/docs/contributor/phorge_code_layout.diviner
@@ -1,111 +1,111 @@
-@title Phabricator Code Layout
+@title Phorge Code Layout
@group developer
-Guide to Phabricator code layout, including how URI mapping works through
+Guide to Phorge code layout, including how URI mapping works through
application class and subdirectory organization best practices.
= URI Mapping =
-When a user visits a Phabricator URI, the Phabricator infrastructure parses
-that URI with a regular expression to determine what controller class to load.
+When a user visits a Phorge URI, the Phorge infrastructure parses that URI with
+ a regular expression to determine what controller class to load.
-The Phabricator infrastructure knows where a given controller class lives on
+The Phorge infrastructure knows where a given controller class lives on
disk from a cache file the Arcanist phutil mapper generates. This mapping
should be updated whenever new classes or files are added:
- arc liberate /path/to/phabricator/src
+ arc liberate /path/to/phorge/src
Finally, a given controller class will map to an application which will have
most of its code in standardized subdirectories and classes.
= Best Practice Class and Subdirectory Organization =
Suppose you were working on the application `Derp`.
- phabricator/src/applications/derp/
+ phorge/src/applications/derp/
If `Derp` were as simple as possible, it would have one subdirectory:
- phabricator/src/applications/derp/controller/
+ phorge/src/applications/derp/controller/
containing the file `DerpController.php` with the class
- `DerpController`: minimally implements a `processRequest()` method
which returns some @{class:AphrontResponse} object. The class would probably
extend @{class:PhabricatorController}.
If `Derp` were (relatively) complex, one could reasonably expect to see
the following directory layout:
- phabricator/src/applications/derp/conduit/
- phabricator/src/applications/derp/constants/
- phabricator/src/applications/derp/controller/
- phabricator/src/applications/derp/editor/
- phabricator/src/applications/derp/exception/
- phabricator/src/applications/derp/query/
- phabricator/src/applications/derp/replyhandler/
- phabricator/src/applications/derp/storage/
- phabricator/src/applications/derp/view/
+ phorge/src/applications/derp/conduit/
+ phorge/src/applications/derp/constants/
+ phorge/src/applications/derp/controller/
+ phorge/src/applications/derp/editor/
+ phorge/src/applications/derp/exception/
+ phorge/src/applications/derp/query/
+ phorge/src/applications/derp/replyhandler/
+ phorge/src/applications/derp/storage/
+ phorge/src/applications/derp/view/
(The following two folders are also likely to be included for JavaScript and
CSS respectively. However, static resources are largely outside the scope of
this document. See @{article:Adding New CSS and JS}.)
- phabricator/webroot/rsrc/js/application/derp/
- phabricator/webroot/rsrc/css/application/derp/
+ phorge/webroot/rsrc/js/application/derp/
+ phorge/webroot/rsrc/css/application/derp/
-These directories under `phabricator/src/applications/derp/` represent
-the basic set of class types from which most Phabricator applications are
+These directories under `phorge/src/applications/derp/` represent
+the basic set of class types from which most Phorge applications are
assembled. Each would contain a class file. For `Derp`, these classes could be
something like:
- **DerpConstants**: constants used in the `Derp` application.
- **DerpController**: business logic providing functionality for a given
URI. Typically, controllers load data via Storage or Query classes, then
present the data to the user via one or more View classes.
- **DerpEditor**: business logic for workflows that change one or more
Storage objects. Editor classes are only necessary for particularly
complicated edits and should be used pragmatically versus Storage objects.
- **DerpException**: exceptions used in the `Derp` application.
- **DerpQuery**: query one or more storage objects for pertinent `Derp`
application data. @{class:PhabricatorOffsetPagedQuery} is particularly
handy for pagination and works well with @{class:AphrontPagerView}.
- **DerpReplyHandler**: business logic from any configured email interactions
users can have with the `Derp` application.
- **DerpStorage**: storage objects for the `Derp` application. Typically
there is a base class which extends @{class:PhabricatorLiskDAO} to configure
application-wide storage settings like the application (thus database) name.
Reading more about the @{class:LiskDAO} is highly recommended.
- **DerpView**: view objects for the `Derp` application. Typically these
extend @{class:AphrontView}.
- **DerpConduitAPIMethod**: provides any and all `Derp` application
functionality that is accessible over Conduit.
However, it is likely that `Derp` is even more complex, and rather than
containing one class, each directory has several classes. A typical example
happens around the CRUD of an object:
- **DerpBaseController**: typically extends @{class:PhabricatorController}
and contains any controller-specific functionality used throughout the
`Derp` application.
- **DerpDeleteController**: typically extends `DerpBaseController` and
presents a confirmation dialogue to the user about deleting a `Derp`.
- **DerpEditController**: typically extends `DerpBaseController` and
presents a form to create and edit `Derps`. Most likely uses
@{class:AphrontFormView} and various `AphrontFormXControl` classes such as
@{class:AphrontFormTextControl} to create the form.
- **DerpListController**: typically extends `DerpBaseController` and displays
a set of one or more `Derps`. Might use @{class:AphrontTableView} to create
a table of `Derps`.
- **DerpViewController**: typically extends `DerpBaseController` and displays
a single `Derp`.
Some especially awesome directories might have a `__tests__` subdirectory
containing all pertinent unit test code for the class.
= Next Steps =
- Learn about @{article:Adding New CSS and JS}; or
- learn about the @{class:LiskDAO}; or
- learn about @{article:Writing Unit Tests}; or
- learn how to contribute (see @{article:Contributor Introduction}).
diff --git a/src/docs/contributor/php_coding_standards.diviner b/src/docs/contributor/php_coding_standards.diviner
index a14acf17f2..bb54478fa3 100644
--- a/src/docs/contributor/php_coding_standards.diviner
+++ b/src/docs/contributor/php_coding_standards.diviner
@@ -1,178 +1,178 @@
@title PHP Coding Standards
@group standards
-This document describes PHP coding standards for Phabricator and related
+This document describes PHP coding standards for Phorge and related
projects (like Arcanist).
= Overview =
This document outlines technical and style guidelines which are followed in
-Phabricator and Arcanist. Contributors should also follow these guidelines.
+Phorge and Arcanist. Contributors should also follow these guidelines.
Many of these guidelines are automatically enforced by lint.
These guidelines are essentially identical to the Facebook guidelines, since I
basically copy-pasted them. If you are already familiar with the Facebook
guidelines, you probably don't need to read this super thoroughly.
= Spaces, Linebreaks and Indentation =
- Use two spaces for indentation. Don't use tab literal characters.
- Use Unix linebreaks ("\n"), not MSDOS ("\r\n") or OS9 ("\r").
- Put a space after control keywords like `if` and `for`.
- Put a space after commas in argument lists.
- Put a space around operators like `=`, `<`, etc.
- Don't put spaces after function names.
- Parentheses should hug their contents.
- Generally, prefer to wrap code at 80 columns.
= Case and Capitalization =
- Name variables and functions using `lowercase_with_underscores`.
- Name classes using `UpperCamelCase`.
- Name methods and properties using `lowerCamelCase`.
- Use uppercase for common acronyms like ID and HTML.
- Name constants using `UPPERCASE`.
- Write `true`, `false` and `null` in lowercase.
= Comments =
- Do not use "#" (shell-style) comments.
- Prefer "//" comments inside function and method bodies.
= PHP Language Style =
- Use "<?php", not the "<?" short form. Omit the closing "?>" tag.
- Prefer casts like `(string)` to casting functions like `strval()`.
- Prefer type checks like `$v === null` to type functions like
`is_null()`.
- Avoid all crazy alternate forms of language constructs like "endwhile"
and "<>".
- Always put braces around conditional and loop blocks.
= PHP Language Features =
- Use PHP as a programming language, not a templating language.
- Avoid globals.
- Avoid extract().
- Avoid eval().
- Avoid variable variables.
- Prefer classes over functions.
- Prefer class constants over defines.
- Avoid naked class properties; instead, define accessors.
- Use exceptions for error conditions.
- Use type hints, use `assert_instances_of()` for arrays holding objects.
= Examples =
**if/else:**
lang=php
if ($some_variable > 3) {
// ...
} else if ($some_variable === null) {
// ...
} else {
// ...
}
You should always put braces around the body of an if clause, even if it is only
one line long. Note spaces around operators and after control statements. Do not
use the "endif" construct, and write "else if" as two words.
**for:**
lang=php
for ($ii = 0; $ii < 10; $ii++) {
// ...
}
Prefer $ii, $jj, $kk, etc., as iterators, since they're easier to pick out
visually and react better to "Find Next..." in editors.
**foreach:**
lang=php
foreach ($map as $key => $value) {
// ...
}
**switch:**
lang=php
switch ($value) {
case 1:
// ...
break;
case 2:
if ($flag) {
// ...
break;
}
break;
default:
// ...
break;
}
`break` statements should be indented to block level.
**array literals:**
lang=php
$junk = array(
'nuts',
'bolts',
'refuse',
);
Use a trailing comma and put the closing parenthesis on a separate line so that
diffs which add elements to the array affect only one line.
**operators:**
lang=php
$a + $b; // Put spaces around operators.
$omg.$lol; // Exception: no spaces around string concatenation.
$arr[] = $element; // Couple [] with the array when appending.
$obj = new Thing(); // Always use parens.
**function/method calls:**
lang=php
// One line
eject($cargo);
// Multiline
AbstractFireFactoryFactoryEngine::promulgateConflagrationInstance(
$fuel,
$ignition_source);
**function/method definitions:**
lang=php
function example_function($base_value, $additional_value) {
return $base_value + $additional_value;
}
class C {
public static function promulgateConflagrationInstance(
IFuel $fuel,
IgnitionSource $source) {
// ...
}
}
**class:**
lang=php
class Dog extends Animal {
const CIRCLES_REQUIRED_TO_LIE_DOWN = 3;
private $favoriteFood = 'dirt';
public function getFavoriteFood() {
return $this->favoriteFood;
}
}
diff --git a/src/docs/contributor/rendering_html.diviner b/src/docs/contributor/rendering_html.diviner
index a8fe5a899d..40892b5ad7 100644
--- a/src/docs/contributor/rendering_html.diviner
+++ b/src/docs/contributor/rendering_html.diviner
@@ -1,182 +1,182 @@
@title Rendering HTML
@group developer
-Rendering HTML in the Phabricator environment.
+Rendering HTML in the Phorge environment.
= Overview =
-Phabricator attempts to prevent XSS by treating strings as default-unsafe when
+Phorge attempts to prevent XSS by treating strings as default-unsafe when
rendering. This means that if you try to build HTML through string
concatenation, it won't work: the string will be escaped by the rendering
pipeline, and the browser will treat it as plain text, not HTML.
This document describes the right way to build HTML components so they are safe
from XSS and render correctly. Broadly:
- Use @{function@arcanist:phutil_tag} (and @{function:javelin_tag}) to build
tags.
- Use @{function@arcanist:hsprintf} where @{function@arcanist:phutil_tag}
is awkward.
- Combine elements with arrays, not string concatenation.
- @{class:AphrontView} subclasses should return a
@{class@arcanist:PhutilSafeHTML} object from their `render()` method.
- @{class:AphrontView} subclasses act like tags when rendering.
- @{function:pht} has some special rules.
- There are some other things that you should be aware of.
See below for discussion.
= Building Tags: phutil_tag() =
Build HTML tags with @{function@arcanist:phutil_tag}. For example:
phutil_tag(
'div',
array(
'class' => 'some-class',
),
$content);
@{function@arcanist:phutil_tag} will properly escape the content and all the
attributes, and return a @{class@arcanist:PhutilSafeHTML} object. The rendering
pipeline knows that this object represents a properly escaped HTML tag. This
allows @{function@arcanist:phutil_tag} to render tags with other tags as
content correctly (without double-escaping):
phutil_tag(
'div',
array(),
phutil_tag(
'strong',
array(),
$content));
-In Phabricator, the @{function:javelin_tag} function is similar to
+In Phorge, the @{function:javelin_tag} function is similar to
@{function@arcanist:phutil_tag}, but provides special handling for the
`sigil` and `meta` attributes.
= Building Blocks: hsprintf() =
Sometimes, @{function@arcanist:phutil_tag} can be particularly awkward to
use. You can use @{function@arcanist:hsprintf} to build larger and more
complex blocks of HTML, when @{function@arcanist:phutil_tag} is a poor fit.
@{function:hsprintf} has `sprintf()` semantics, but `%s` escapes HTML:
// Safely build fragments or unwieldy blocks.
hsprintf(
'<div id="%s">',
$div_id);
@{function:hsprintf} can be especially useful when:
- You need to build a block with a lot of tags, like a table with rows and
cells.
- You need to build part of a tag (usually you should avoid this, but if you
do need to, @{function@arcanist:phutil_tag} can not do it).
Note that it is unsafe to provide any user-controlled data to the first
parameter of @{function@arcanist:hsprintf} (the `sprintf()`-style pattern).
Like @{function@arcanist:phutil_tag}, this function returns a
@{class@arcanist:PhutilSafeHTML} object.
= Composing Tags =
When you are building a view which combines smaller components, like a section
with a header and a body:
$header = phutil_tag('h1', ...);
$body = phutil_tag('p', ...);
...you should NOT use string concatenation:
COUNTEREXAMPLE
// Not dangerous, but does the wrong thing.
phutil_tag('div', array(), $header.$body);
Instead, use an array:
// Render a tag containing other tags safely.
phutil_tag('div', array(), array($header, $body));
If you concatenate @{class@arcanist:PhutilSafeHTML} objects, they revert to
normal strings and are no longer marked as properly escaped tags.
(In the future, these objects may stop converting to strings, but for now they
must to maintain backward compatibility.)
If you need to build a list of items with some element in between each of them
(like a middot, comma, or vertical bar) you can use
@{function:phutil_implode_html}:
// Render links with commas between them.
phutil_tag(
'div',
array(),
phutil_implode_html(', ', $list_of_links));
= AphrontView Classes =
-Subclasses of @{class:AphrontView} in Phabricator should return a
+Subclasses of @{class:AphrontView} in Phorge should return a
@{class@arcanist:PhutilSafeHTML} object. The easiest way to do this is to
return `phutil_tag()` or `javelin_tag()`:
return phutil_tag('div', ...);
You can use an @{class:AphrontView} subclass like you would a tag:
phutil_tag('div', array(), $view);
= Internationalization: pht() =
The @{function:pht} function has some special rules. If any input to
@{function:pht} is a @{class@arcanist:PhutilSafeHTML} object, @{function:pht}
returns a @{class@arcanist:PhutilSafeHTML} object itself. Otherwise, it returns
normal text.
This is generally safe because translations are not permitted to have more tags
than the original text did (so if the original text had no tags, translations
can not add any).
Normally, this just means that @{function:pht} does the right thing and behaves
like you would expect, but it is worth being aware of.
= Special Cases =
NOTE: This section describes dangerous methods which can bypass XSS protections.
If possible, do not use them.
You can build @{class@arcanist:PhutilSafeHTML} out of a string explicitly by
calling @{function:phutil_safe_html} on it. This is **dangerous**, because if
you are wrong and the string is not actually safe, you have introduced an XSS
vulnerability. Consequently, you should avoid calling this if possible.
You can use @{function@arcanist:phutil_escape_html_newlines} to escape HTML
while converting newlines to `<br />`. You should not need to explicitly use
@{function@arcanist:phutil_escape_html} anywhere.
If you need to apply a string function (such as `trim()`) to safe HTML, use
@{method@arcanist:PhutilSafeHTML::applyFunction}.
If you need to extract the content of a @{class@arcanist:PhutilSafeHTML}
object, you should call `getHTMLContent()`, not cast it to a string. Eventually,
we would like to remove the string cast entirely.
Functions @{function@arcanist:phutil_tag} and @{function@arcanist:hsprintf}
are not safe if you pass the user input for the tag or attribute name. All the
following examples are dangerous:
counterexample
phutil_tag($evil);
phutil_tag('span', array($evil => $evil2));
phutil_tag('span', array('onmouseover' => $evil));
// Use PhutilURI to check if $evil is valid HTTP link.
hsprintf('<a href="%s">', $evil);
hsprintf('<%s>%s</%s>', $evil, $evil2, $evil);
// We have a lint rule disallowing this.
hsprintf($evil);
diff --git a/src/docs/contributor/reproduction_steps.diviner b/src/docs/contributor/reproduction_steps.diviner
index 1050e43c48..4a8d916efc 100644
--- a/src/docs/contributor/reproduction_steps.diviner
+++ b/src/docs/contributor/reproduction_steps.diviner
@@ -1,252 +1,218 @@
@title Providing Reproduction Steps
@group detail
Describes how to provide reproduction steps.
Overview
========
-When you submit a bug report about Phabricator, you **MUST** include
+When you submit a bug report about Phorge, you **MUST** include
reproduction steps. We can not help you with bugs we can not reproduce, and
will not accept reports which omit reproduction steps or have incomplete or
insufficient instructions.
This document explains what we're looking for in good reproduction steps.
Briefly:
- Reproduction steps must allow us to reproduce the problem locally on a
- clean, up-to-date install of Phabricator.
+ clean, up-to-date install of Phorge.
- Reproduction should be as simple as possible.
Good reproduction steps can take time to write out clearly, simplify, and
verify. As a reporter, we expect you to shoulder as much of this burden as you
can, to make make it easy for us to reproduce and resolve bugs.
We do not have the resources to pursue reports with limited, inaccurate, or
incomplete reproduction steps, and will not accept them. These reports require
large amounts of our time and are often fruitless.
Example Reproduction Steps
==========================
Here's an example of what good reproduction steps might look like:
---
Reproduction steps:
- Click "Create Event" in Calendar.
- Fill in the required fields with any text (name, description, etc).
- Set an invalid time for one of the dates, like the meaningless string
"Tea Time". This is not valid, so we're expecting an error when we
submit the form.
- Click "Create" to save the event.
Expected result:
- Form reloads with an error message about the specific mistake.
- All field values are retained.
Actual result:
- Form reloads with an error message about the specific mistake.
- Most values are discarded, so I have to re-type the name, description, etc.
---
These steps are **complete** and **self-contained**: anyone can reproduce the
issue by following these steps. These steps are **clear** and **easy to
follow**. These steps are also simple and minimal: they don't include any
extra unnecessary steps.
Finally, these steps explain what the reporter expected to happen, what they
observed, and how those behaviors differ. This isn't as important when the
observation is an obvious error like an exception, but can be important if a
behavior is merely odd or ambiguous.
Reliable Reproduction
=====================
When you file a bug report, the first thing we do to fix it is to try to
-reproduce the problem locally on an up-to-date install of Phabricator. We will
+reproduce the problem locally on an up-to-date install of Phorge. We will
do this by following the steps you provide. If we can recreate the issue
locally, we can almost always resolve the problem (often very promptly).
However, many reports do not have enough information, are missing important
steps, or rely on data (like commits, users, other projects, permission
settings, feed stories, etc) that we don't have access to. We either can't
follow these steps, or can't reproduce issues by following them.
Reproduction steps must be complete and self-contained, and must allow
-**anyone** to reproduce the issue on a new, empty install of Phabricator. If
+**anyone** to reproduce the issue on a new, empty install of Phorge. If
the bug you're seeing depends on data or configuration which would not be
present on a new install, you need to include that information in your steps.
For example, if you're seeing an issue which depends on a particular policy
setting or configuration setting, you need to include instructions for creating
the policy or adjusting the setting in your steps.
Getting Started
===============
To generate reproduction steps, first find a sequence of actions which
reproduce the issue you're seeing reliably.
Next, write down everything you did as clearly as possible. Make sure each step
is self-contained: anyone should be able to follow your steps, without access
to private or proprietary data.
Now, to verify that your steps provide a complete, self-contained way to
reproduce the issue, follow them yourself on a new, empty, up-to-date instance
-of Phabricator.
-
-If you can't easily start an empty instance locally, you can launch an empty
-instance on Phacility in about 60 seconds (see the "Resources" section for
-details).
+of Phorge.
If you can follow your steps and reproduce the issue on a clean instance,
we'll probably be able to follow them and reproduce the issue ourselves.
If you can't follow your steps because they depend on information which is not
available on a clean instance (for example, a certain config setting), rewrite
them to include instructions to create that information (for example, adjusting
the config to the problematic value).
If you follow your instructions but the issue does not reproduce, the issue
might already be fixed. Make sure your install is up to date.
If your install is up to date and the issue still doesn't reproduce on a clean
install, your reproduction steps are missing important information. You need to
figure out what key element differs between your install and the clean install.
Once you have working reproduction steps, your steps may have details which
aren't actually necessary to reproduce the issue. You may be able to simplify
them by removing some steps or describing steps more narrowly. For help, see
"Simplifying Steps" below.
-Resources
-=========
-
-We provide some resources which can make it easier to start building steps, or
-to simplify steps.
-
-**Phacility Test Instances**: You can launch a new, up-to-date instance of
-Phabricator on Phacility in about a minute at <https://admin.phacility.com>.
-These instances run `stable`.
-
-You can use these instances to make sure that issues haven't already been
-fixed, that they reproduce on a clean install, or that your steps are really
-complete, and that the root cause isn't custom code or local extensions. Using
-a test instance will avoid disrupting other users on your install.
-
-**Test Repositories**: There are several test repositories on
-`secure.phabricator.com` which you can push commits to if you need to build
-an example to demonstrate a problem.
-
-For example, if you're seeing an issue with a certain filename but the commit
-where the problem occurs is in a proprietary internal repository, push a commit
-that affects a file with a similar name to a test repository, then reproduce
-against the test data. This will allow you to generate steps which anyone can
-follow.
-
-
Simplifying Steps
=================
If you aren't sure how to simplify reproduction steps, this section has some
advice.
In general, you'll simplify reproduction steps by first finding a scenario
where the issue reproduces reliably (a "bad" case) and a second, similar
situation where it does not reproduce (a "good" case). Once you have a "bad"
case and a "good" case, you'll change the scenarios step-by-step to be more
similar to each other, until you have two scenarios which differ only a very
small amount. This remaining difference usually points clearly at the root
cause of the issue.
For example, suppose you notice that you get an error if you commit a file
named `A Banana.doc`, but not if you commit a file named `readme.md`. In
this case, `A Banana.doc` is your "bad" case and `readme.md` is your "good"
case.
There are several differences between these file names, and any of them might
be causing the problem. To narrow this down, you can try making the scenarios
more similar. For example, do these files work?
- `A_Banana.doc` - Problem with spaces?
- `A Banana.md` - File extension issue?
- `A Ban.doc` - Path length issue?
- `a banana.doc` - Issue with letter case?
Some of these probably work, while others might not. These could lead you to a
smaller case which reproduces the problem, which might be something like this:
- Files like `a b`, which contain spaces, do not work.
- Files like `a.doc`, which have the `.doc` extension, do not work.
- Files like `AAAAAAAAAA`, which have more than 9 letters, do not work.
- Files like `A`, which have uppercase letters, do not work.
With a simpler reproduction scenario, you can simplify your steps to be more
tailored and minimal. This will help us pinpoint the issue more quickly and
be more certain that we've understood and resolved it.
It is more important that steps be complete than that they be simple, and it's
acceptable to submit complex instructions if you have difficulty simplifying
them, so long as they are complete, self-contained, and accurately reproduce
the issue.
Things to Avoid
===============
These are common mistakes when providing reproduction instructions:
**Insufficient Information**: The most common issue we see is instructions
which do not have enough information: they are missing critical details which
are necessary in order to follow them on a clean install.
**Inaccurate Steps**: The second most common issue we see is instructions
which do not actually reproduce a problem when followed on a clean, up-to-date
install. Verify your steps by testing them.
**Private Information**: Some users provide reports which hinge on the
particulars of private commits in proprietary repositories we can not access.
This is not useful, because we can not examine the underlying commit to figure
out why it is causing an issue.
Instead, reproduce the issue in a public repository. There are several test
repositories available which you can push commits to in order to construct a
reproduction case.
**Screenshots**: Screenshots can be helpful to explain a set of steps or show
what you're seeing, but they usually aren't sufficient on their own because
they don't contain all the information we need to reproduce them.
For example, a screenshot may show a particular policy or object, but not have
enough information for us rebuild a similar object locally.
Alternatives
============
If you have an issue which you can't build reproduction steps for, or which
only reproduces in your environment, or which you don't want to narrow down
to a minimal reproduction case, we can't accept it as a bug report. These
issues are tremendously time consuming for us to pursue and rarely benefit
more than one install.
-If the issue is important but falls outside the scope of permissible bug
-reports, we're happy to provide more tailored support at consulting rates. See
-[[ https://secure.phabricator.com/w/consulting/ | Consulting ]] for details.
-
Next Steps
==========
Continue by:
- returning to @{article:Contributing Bug Reports}.
diff --git a/src/docs/contributor/running_builtin_php_webserver.diviner b/src/docs/contributor/running_builtin_php_webserver.diviner
index 1e4bdacb5c..b511731dc1 100644
--- a/src/docs/contributor/running_builtin_php_webserver.diviner
+++ b/src/docs/contributor/running_builtin_php_webserver.diviner
@@ -1,9 +1,9 @@
@title Running built-in PHP webserver
@group developer
As of version 5.4.0, the PHP command line interface provides a built-in web
server. This web server is designed for developmental purposes only, and should
-not be used in production. Phabricator can be executed under it with the
+not be used in production. Phorge can be executed under it with the
command:
- $ php -S localhost:8000 -t path/to/phabricator/webroot/ path/to/phabricator/webroot/index.php
+ $ php -S localhost:8000 -t path/to/phorge/webroot/ path/to/phorge/webroot/index.php
diff --git a/src/docs/contributor/unit_tests.diviner b/src/docs/contributor/unit_tests.diviner
index 7977a4a876..35cee09566 100644
--- a/src/docs/contributor/unit_tests.diviner
+++ b/src/docs/contributor/unit_tests.diviner
@@ -1,86 +1,86 @@
@title Writing Unit Tests
@group developer
-Simple guide to Arcanist and Phabricator unit tests.
+Simple guide to Arcanist and Phorge unit tests.
= Overview =
-Arcanist and Phabricator provide and use a simple unit test framework. This
+Arcanist and Phorge provide and use a simple unit test framework. This
document is aimed at project contributors and describes how to use it to add
and run tests in these projects or other libphutil libraries.
In the general case, you can integrate `arc` with a custom unit test engine
(like PHPUnit or any other unit testing library) to run tests in other projects.
See @{article:Arcanist User Guide: Customizing Lint, Unit Tests and Workflows}
for information on customizing engines.
= Adding Tests =
-To add new tests to a Arcanist or Phabricator module:
+To add new tests to a Arcanist or Phorge module:
- Create a `__tests__/` directory in the module if it doesn't exist yet.
- Add classes to the `__tests__/` directory which extend from
- @{class:PhabricatorTestCase} (in Phabricator) or
+ @{class:PhabricatorTestCase} (in Phorge) or
@{class@arcanist:PhutilTestCase} (elsewhere).
- Run `arc liberate` on the library root so your classes are loadable.
= Running Tests =
Once you've added test classes, you can run them with:
- `arc unit path/to/module/`, to explicitly run module tests.
- `arc unit`, to run tests for all modules affected by changes in the
working copy.
- `arc diff` will also run `arc unit` for you.
= Example Test Case =
Here's a simple example test:
lang=php
- class PhabricatorTrivialTestCase extends PhabricatorTestCase {
+ class PhorgeTrivialTestCase extends PhabricatorTestCase {
private $two;
public function willRunOneTest($test_name) {
// You can execute setup steps which will run before each test in this
// method.
$this->two = 2;
}
public function testAllIsRightWithTheWorld() {
$this->assertEqual(4, $this->two + $this->two, '2 + 2 = 4');
}
}
You can see this class at @{class:PhabricatorTrivialTestCase} and run it with:
- phabricator/ $ arc unit src/infrastructure/testing/testcase/
+ phorge/ $ arc unit src/infrastructure/testing/testcase/
PASS <1ms* testAllIsRightWithTheWorld
For more information on writing tests, see
@{class@arcanist:PhutilTestCase} and @{class:PhabricatorTestCase}.
= Database Isolation =
-By default, Phabricator isolates unit tests from the database. It makes a crude
+By default, Phorge isolates unit tests from the database. It makes a crude
effort to simulate some side effects (principally, ID assignment on insert), but
any queries which read data will fail to select any rows and throw an exception
about isolation. In general, isolation is good, but this can make certain types
of tests difficult to write. When you encounter issues, you can deal with them
in a number of ways. From best to worst:
- Encounter no issues; your tests are fast and isolated.
- Add more simulated side effects if you encounter minor issues and simulation
is reasonable.
- Build a real database simulation layer (fairly complex).
- Disable isolation for a single test by using
`LiskDAO::endIsolateAllLiskEffectsToCurrentProcess();` before your test
and `LiskDAO::beginIsolateAllLiskEffectsToCurrentProcess();` after your
test. This will disable isolation for one test. NOT RECOMMENDED.
- Disable isolation for your entire test case by overriding
`getPhabricatorTestCaseConfiguration()` and providing
`self::PHABRICATOR_TESTCONFIG_ISOLATE_LISK => false` in the configuration
dictionary you return. This will disable isolation entirely. STRONGLY NOT
RECOMMENDED.
diff --git a/src/docs/contributor/using_edges.diviner b/src/docs/contributor/using_edges.diviner
index 89d07f4c2f..132d728019 100644
--- a/src/docs/contributor/using_edges.diviner
+++ b/src/docs/contributor/using_edges.diviner
@@ -1,31 +1,30 @@
@title Using Edges
@group developer
Guide to the Edges infrastructure.
= Overview =
Edges are a generic way of storing a relationship between two objects (like a
-Task and its attached files). If you are familiar with the Facebook associations
-framework, Phabricator Edges are substantially similar.
+Task and its attached files).
An edge is defined by a source PHID (the edge origin), a destination PHID
(the edge destination) and an edge type (which describes the relationship,
like "is subscribed to" or "has attached file").
Every edge is directional, and stored alongside the source object. Some edges
are configured to automatically write an inverse edge, effectively building
a bidirectional relationship. The strength of storing relationships like this
is that they work when databases are partitioned or sharded.
= Reading Edges =
You can load edges with @{class:PhabricatorEdgeQuery}.
= Writing Edges =
You can edit edges with @{class:PhabricatorEdgeEditor}.
= Edges and Lisk =
@{class:PhabricatorLiskDAO} includes some builtin support for edges.
diff --git a/src/docs/contributor/using_oauthserver.diviner b/src/docs/contributor/using_oauthserver.diviner
index b40496d7cf..3e30065fd1 100644
--- a/src/docs/contributor/using_oauthserver.diviner
+++ b/src/docs/contributor/using_oauthserver.diviner
@@ -1,116 +1,116 @@
-@title Using the Phabricator OAuth Server
+@title Using the Phorge OAuth Server
@group developer
-How to use the Phabricator OAuth Server.
+How to use the Phorge OAuth Server.
= Overview =
-Phabricator includes an OAuth Server which supports the
+Phorge includes an OAuth Server which supports the
`Authorization Code Grant` flow as described in the OAuth 2.0
specification:
http://tools.ietf.org/html/draft-ietf-oauth-v2-23
This functionality can allow clients to integrate with a given
-Phabricator instance in a secure way with granular data access.
-For example, Phabricator can be used as a central identity store for any
+Phorge instance in a secure way with granular data access.
+For example, Phorge can be used as a central identity store for any
clients that implement OAuth 2.0.
= Vocabulary =
- **Access token** - a token which allows a client to ask for data on behalf
of a resource owner. A given client will only be able to access data included
in the scope(s) the resource owner authorized that client for.
- **Authorization code** - a short-lived code which allows an authenticated
client to ask for an access token on behalf of some resource owner.
- **Client** - this is the application or system asking for data from the
OAuth Server on behalf of the resource owner.
- **Resource owner** - this is the user the client and OAuth Server are
concerned with on a given request.
- **Scope** - this defines a specific piece of granular data a client can
or can not access on behalf of a user. For example, if authorized for the
"whoami" scope on behalf of a given resource owner, the client can get the
results of Conduit.whoami for that resource owner when authenticated with
a valid access token.
= Setup - Creating a Client =
# Visit {nav Your Local Install > OAuth Server > Create Application}
# Fill out the form
# Profit
= Obtaining an Authorization Code =
-POST or GET `https://phabricator.example.com/oauthserver/auth/` with the
+POST or GET `https://phorge.example.com/oauthserver/auth/` with the
following parameters:
- Required - **client_id** - the id of the newly registered client.
- Required - **response_type** - the desired type of authorization code
response. Only code is supported at this time.
- Optional - **redirect_uri** - override the redirect_uri the client
registered. This redirect_uri must have the same fully-qualified domain,
path, port and have at least the same query parameters as the redirect_uri
the client registered, as well as have no fragments.
- Optional - **scope** - specify what scope(s) the client needs access to
in a space-delimited list.
- Optional - **state** - an opaque value the client can send to the server
for programmatic excellence. Some clients use this value to implement XSRF
protection or for debugging purposes.
If done correctly and the resource owner has not yet authorized the client
for the desired scope, then the resource owner will be presented with an
interface to authorize the client for the desired scope. The OAuth Server
will redirect to the pertinent redirect_uri with an authorization code or
an error indicating the resource owner did not authorize the client, depending.
If done correctly and the resource owner has already authorized the client for
the desired scope, then the OAuth Server will redirect to the pertinent
redirect_uri with a valid authorization code.
If there is an error, the OAuth Server will return a descriptive error
message. This error will be presented to the resource owner on the
-Phabricator domain if there is reason to believe there is something fishy
+Phorge domain if there is reason to believe there is something fishy
with the client. For example, if there is an issue with the redirect_uri.
Otherwise, the OAuth Server will redirect to the pertinent redirect_uri
and include the pertinent error information.
= Obtaining an Access Token =
-POST or GET `https://phabricator.example.com/oauthserver/token/`
+POST or GET `https://phorge.example.com/oauthserver/token/`
with the following parameters:
- Required - **client_id** - the id of the client
- Required - **client_secret** - the secret of the client.
This is used to authenticate the client.
- Required - **code** - the authorization code obtained earlier.
- Required - **grant_type** - the desired type of access grant.
Only token is supported at this time.
- Optional - **redirect_uri** - should be the exact same redirect_uri as
the redirect_uri specified to obtain the authorization code. If no
redirect_uri was specified to obtain the authorization code then this
should not be specified.
If done correctly, the OAuth Server will redirect to the pertinent
redirect_uri with an access token.
If there is an error, the OAuth Server will return a descriptive error
message.
= Using an Access Token =
Simply include a query param with the key of "access_token" and the value
as the earlier obtained access token. For example:
-```https://phabricator.example.com/api/user.whoami?access_token=ykc7ly7vtibj334oga4fnfbuvnwz4ocp```
+```https://phorge.example.com/api/user.whoami?access_token=ykc7ly7vtibj334oga4fnfbuvnwz4ocp```
If the token has expired or is otherwise invalid, the client will receive
an error indicating as such. In these cases, the client should re-initiate
the entire `Authorization Code Grant` flow.
NOTE: See "Scopes" section below for more information on what data is
currently exposed through the OAuth Server.
Scopes
======
//This section has not been written yet.//
diff --git a/src/docs/contributor/version.diviner b/src/docs/contributor/version.diviner
index 8ffafede1b..6e960c7993 100644
--- a/src/docs/contributor/version.diviner
+++ b/src/docs/contributor/version.diviner
@@ -1,80 +1,80 @@
@title Providing Version Information
@group detail
How to provide version information with reports made to the upstream.
Overview
========
When you submit a bug report, we require that you include version information.
Despite our insistence that users update before reporting issues, many reports
we receive describe issues which have already been resolved. Including version
information in your report allows us to quickly determine that you are out of
date and that updating will fix your issue.
That said, your report must also include reproduction steps, and you should be
unable to generate valid reproduction steps for an issue which has already been
resolved because valid reproduction steps must also reproduce against a clean,
up-to-date install. See @{article:Providing Reproduction Steps} for details.
-Phabricator Version
-===================
+Phorge Version
+==============
-To get Phabricator version information:
+To get Phorge version information:
- Go to the {nav Config} application. You can type "Config" into the global
search box, or navigate to `https://your.install.com/config/`. You must
be an administrator to access this application.
- Click {nav Versions} in the left menu.
- Copy and paste all of the information on the page into your report.
Arcanist Version
================
To get Arcanist version information:
- Run `arc version`.
- Copy and paste all of the output into your report.
Other Versions
==============
In general, we use `git` commit hashes as version identifiers, so you can
identify the version of something by running `git show` and copy/pasting the
hash from the output. This may be useful if you're encountering an issue which
prevents you from reaching the version reporting screen.
Running a Fork?
===============
-If you've forked Phabricator and have local commits, please make sure you are
+If you've forked Phorge and have local commits, please make sure you are
reporting upstream commit hashes, not local commit hashes. The UI will attempt
to figure out where you branched from, but it may not be able to in all cases.
If you report local commit hashes instead of upstream commit hashes we can not
go look up the commit hashes to figure out which changes they correspond to, so
we can not use that information to determine out how old your install is or
which patches you are missing.
In most cases, you can find the upstream commit you've branched from like this:
```
$ git merge-base HEAD origin/master
```
Note that if you report a bug and have local commits, we will almost always ask
-you to reproduce the issue against a clean copy of Phabricator before we
+you to reproduce the issue against a clean copy of Phorge before we
continue. You can get help faster by doing this //before// reporting an issue.
Next Steps
==========
Continue by:
- returning to @{article:Contributing Bug Reports}.
diff --git a/src/docs/flavor/about_flavor_text.diviner b/src/docs/flavor/about_flavor_text.diviner
deleted file mode 100644
index 3d46ff41d0..0000000000
--- a/src/docs/flavor/about_flavor_text.diviner
+++ /dev/null
@@ -1,9 +0,0 @@
-@title About Flavor Text
-@group overview
-
-Explains what's going on here.
-
-= Overview =
-
-Flavor Text is a collection of short articles which pertain to software
-development in general, not necessarily to Phabricator specifically.
diff --git a/src/docs/flavor/project_history.diviner b/src/docs/flavor/project_history.diviner
index c3b5363d50..1159304cda 100644
--- a/src/docs/flavor/project_history.diviner
+++ b/src/docs/flavor/project_history.diviner
@@ -1,60 +1,63 @@
-@title Phabricator Project History
+@title Phorge Project History
@group lore
-A riveting tale of adventure. In this document, I refer to worldly and
-sophisticated engineer Evan Priestley as "I", which is only natural as I am he.
-
-This document is mostly just paragraph after paragraph of self-aggrandizement.
+A riveting tale of adventure.
= In The Beginning =
-I wrote the original version of Differential in one night at a Facebook
-Hackathon in April or May 2007, along with Luke Shepard. I joined the company in
-April and code review was already an established and mostly-mandatory part of
-the culture, but it happened over email and was inefficient and hard to keep
-track of. I remember feeling like I was spending a lot of time waiting for code
-review to happen, which was a major motivator for building the tool.
+Evan Priestley wrote the original version of Differential in one night at a
+Facebook Hackathon in April or May 2007, along with Luke Shepard. He joined the
+company in April and code review was already an established and mostly-mandatory
+part of the culture, but it happened over email and was inefficient and hard to
+keep track of. Evan remembers feeling like he was spending a lot of time waiting
+ for code review to happen, which was a major motivator for building the tool.
The original name of the tool was "Diffcamp". Some time earlier there had been
an attempt to create a project management tool that was a sort of hybrid between
-Trac and Basecamp called "Traccamp". Since we were writing the code review tool
-at the height of the brief popularity Traccamp enjoyed, we integrated and called
-the new tool Diffcamp even though it had no relation to Basecamp. Traccamp fell
-by the wayside shortly thereafter and was eventually removed.
+Trac and Basecamp called "Traccamp". Since they were writing the code review tool
+at the height of the brief popularity Traccamp enjoyed, Evan and Luke integrated
+ and called the new tool Diffcamp even though it had no relation to Basecamp.
+ Traccamp fell by the wayside shortly thereafter and was eventually removed.
-However, Diffcamp didn't share its fate. We spent some more time working on it
-and got good enough to win hearts and minds over emailing diffs around and was
-soon the de facto method of code review at Facebook.
+However, Diffcamp didn't share its fate. Evan and Luke spent some more time
+working on it and got good enough to win hearts and minds over emailing diffs
+around and was soon the de facto method of code review at Facebook.
= The Long Bloat =
For the next two and a half years, Diffcamp grew mostly organically and gained a
number of features like inline commenting, CLI support and git support (Facebook
was 100% SVN in early 2007 but 90%+ of Engineers worked primarily in git with
SVN bridging by 2010). As these patches were contributed pretty much randomly,
it also gained a lot of performance problems, usability issues, and bugs.
-Through 2007 and 2008 I worked mostly on frontend and support infrastructure;
-among other things, I wrote a static resource management system called Haste. In
-2009 I worked on the Facebook Lite site, where I built the Javelin Javascript
-library and an MVC-flavored framework called Alite.
+Through 2007 and 2008 Evan worked mostly on frontend and support infrastructure;
+among other things, he wrote a static resource management system called Haste.
+In 2009 Evan worked on the Facebook Lite site, where he built the Javelin
+Javascript library and an MVC-flavored framework called Alite.
But by early 2010, Diffcamp was in pretty bad shape. Two years of having random
features grafted onto it without real direction had left it slow and difficult
to use. Internal feedback on the tool was pretty negative, with a lot of
complaints about performance and stability. The internal XTools team had made
inroads at fixing these problems in late 2009, but they were stretched thin and
the tool had become a sprawling landscape of architectural and implementation
problems.
= Differential =
-I joined the new Dev Tools team around February 2010 and took over Diffcamp. I
-renamed it to Differential, moved it to a new Alite-based infrastructure with
-Javelin, and started making it somewhat less terrible. I eventually wrote
+Evan joined the new Dev Tools team around February 2010 and took over Diffcamp.
+He renamed it to Differential, moved it to a new Alite-based infrastructure with
+Javelin, and started making it somewhat less terrible. He eventually wrote
Diffusion and built Herald to replace a very difficult-to-use predecessor. These
-tools were less negatively received than the older versions. By December 2010 I
-started open sourcing them; Haste became //Celerity// and Alite became
-//Aphront//. I wrote Maniphest to track open issues with the project in January
-or February, left Facebook in April, and shortly after, we open sourced
+tools were less negatively received than the older versions. By December 2010,
+Evan started open sourcing them; Haste became //Celerity// and Alite became
+//Aphront//. He wrote Maniphest to track open issues with the project in January
+or February, left Facebook in April, and shortly after, open sourced
Phabricator.
+
+= Phork =
+In 2021, Evan announced that Phabricator was no longer maintained. A group of
+open-source contributors came together and forked it. This new
+group called renamed the project "Phorge" and continues to maintain this
+beloved and well-used project.
diff --git a/src/docs/flavor/recommendations_on_branching.diviner b/src/docs/flavor/recommendations_on_branching.diviner
index 38d196ad9b..521b08d873 100644
--- a/src/docs/flavor/recommendations_on_branching.diviner
+++ b/src/docs/flavor/recommendations_on_branching.diviner
@@ -1,188 +1,188 @@
@title Recommendations on Branching
@group review
Project recommendations on how to organize branches.
This document discusses organizing branches in your remote/origin for feature
development and release management, not the use of local branches in Git or
queues or bookmarks in Mercurial.
-This document is purely advisory. Phabricator works with a variety of branching
+This document is purely advisory. Phorge works with a variety of branching
strategies, and diverging from the recommendations in this document
will not impact your ability to use it for code review and source management.
= Overview =
-This document describes a branching strategy used by Facebook and Phabricator to
+This document describes a branching strategy used by Facebook and Phorge to
develop software. It scales well and removes the pain associated with most
branching strategies. This strategy is most applicable to web applications, and
may be less applicable to other types of software. The basics are:
- Never put feature branches in the remote/origin/trunk.
- Control access to new features with runtime configuration, not branching.
The next sections describe these points in more detail, explaining why you
should consider abandoning feature branches and how to build runtime access
controls for features.
= Feature Branches =
Suppose you are developing a new feature, like a way for users to "poke" each
other. A traditional strategy is to create a branch for this feature in the
remote (say, "poke_branch"), develop the feature on the branch over some period
of time (say, a week or a month), and then merge the entire branch back into
master/default/trunk when the feature is complete.
This strategy has some drawbacks:
- You have to merge. Merging can be painful and error prone, especially if the
feature takes a long time to develop. Reducing merge pain means spending
time merging master into the branch regularly. As branches fall further
out of sync, merge pain/risk tends to increase.
- This strategy generally aggregates risk into a single high-risk merge event
at the end of development. It does this both explicitly (all the code lands
at once) and more subtly: since commits on the branch aren't going live any
time soon, it's easy to hold them to a lower bar of quality.
- When you have multiple feature branches, it's impossible to test
interactions between the features until they are merged.
- You generally can't A/B test code in feature branches, can't roll it out to
a small percentage of users, and can't easily turn it on for just employees
since it's in a separate branch.
Of course, it also has some advantages:
- If the new feature replaces an older feature, the changes can delete the
older feature outright, or at least transition from the old feature to the
new feature fairly rapidly.
- The chance that this code will impact production before the merge is nearly
zero (it normally requires substantial human error). This is the major
reason to do it at all.
Instead, consider abandoning all use of feature branching. The advantages are
straightforward:
- You don't have to do merges.
- Risk is generally spread out more evenly into a large number of very small
risks created as each commit lands.
- You can test interactions between features in development easily.
- You can A/B test and do controlled rollouts easily.
But it has some tradeoffs:
- If a new feature replaces an older feature, both have to exist in the same
codebase for a while. But even with feature branching, you generally have to
do almost all this work anyway to avoid situations where you flip a switch
and can't undo it.
- You need an effective way to control access to features so they don't launch
before they're ready. Even with this, there is a small risk a feature may
launch or leak because of a smaller human error than would be required with
feature branching. However, for most applications, this isn't a big deal.
This second point is a must-have, but entirely tractable. The next section
describes how to build it, so you can stop doing feature branching and never
deal with the pain and risk of merging again.
= Controlling Access to Features =
Controlling access to features is straightforward: build some kind of runtime
configuration which defines which features are visible, based on the tier
(e.g., development, testing or production?) code is deployed on, the logged in
user, global configuration, random buckets, A/B test groups, or whatever else.
Your code should end up looking something like this:
if (is_feature_launched('poke')) {
show_poke();
}
Behind that is some code which knows about the 'poke' feature and can go lookup
configuration to determine if it should be visible or not. Facebook has a very
sophisticated system for this (called GateKeeper) which also integrates with A/B
tests, allows you to define complicated business rules, etc.
You don't need this in the beginning. Before GateKeeper, Facebook used a much
simpler system (called Sitevars) to handle this. Here are some resources
describing similar systems:
- There's a high-level overview of Facebook's system in this 2011 tech talk:
[[http://techcrunch.com/2011/05/30/facebook-source-code/ | Facebook Push
Tech Talk]].
- Flickr described their similar system in a 2009 blog post here:
[[http://code.flickr.com/blog/2009/12/02/flipping-out/ | Flickr Feature
Flags and Feature Flippers]].
- Disqus described their similar system in a 2010 blog post here:
[[http://blog.disqus.com/post/789540337/partial-deployment-with-feature-switches |
Disqus Feature Switches]].
- Forrst describes their similar system in a 2010 blog post here:
[[http://blog.forrst.com/post/782356699/how-we-deploy-new-features-on-forrst |
Forrst Buckets]].
- Martin Fowler discusses these systems in a 2010 blog post here:
[[http://martinfowler.com/bliki/FeatureToggle.html |
Martin Fowler's FeatureToggle]].
- - Phabricator just adds config options but defaults them to off. When
+ - Phorge just adds config options but defaults them to off. When
developing, we turn them on locally. Once a feature is ready, we default it
on. We have a vastly less context to deal with than most projects, however,
and sometimes get away with simply not linking new features in the UI until
they mature (it's open source anyway so there's no value in hiding things).
When building this system there are a few things to avoid, mostly related to not
letting the complexity of this system grow too wildly:
- Facebook initially made it very easy to turn things on to everyone by
accident in GateKeeper. Don't do this. The UI should make it obvious when
you're turning something on or off, and default to off.
- Since GateKeeper is essentially a runtime business rules engine, it was
heavily abused to effectively execute code living in a database. Avoid this
through simpler design or a policy of sanity.
- Facebook allowed GateKeeper rules to depend on other GateKeeper rules
(for example, 'new_profile_tour' is launched if 'new_profile' is launched)
but did not perform cycle detection, and then sat on a bug describing
how to introduce a cycle and bring the site down for a very long time,
until someone introduced a cycle and brought the site down. If you implement
dependencies, implement cycle detection.
- Facebook implemented some very expensive GateKeeper conditions and was
spending 100+ ms per page running complex rulesets to do launch checks for a
number of months in 2009. Keep an eye on how expensive your checks are.
That said, not all complexity is bad:
- Allowing features to have states like "3%" instead of just "on" or "off"
allows you to roll out features gradually and watch for trouble (e.g.,
services collapsing from load).
- Building a control panel where you hit "Save" and all production servers
immediately reflect the change allows you to quickly turn things off if
there are problems.
- If you perform A/B testing, integrating A/B tests with feature rollouts
is probably a natural fit.
- Allowing new features to be launched to all employees before they're
launched to the world is a great way to get feedback and keep everyone
in the loop.
Adopting runtime feature controls increases the risk of features leaking (or
even launching) before they're ready. This is generally a small risk which is
probably reasonable for most projects to accept, although it might be
unacceptable for some projects. There are some ways you can mitigate this
risk:
- Essentially every launch/leak at Facebook was because someone turned on
a feature by accident when they didn't mean to. The control panel made this
very easy: features defaulted to "on", and if you tried to do something
common like remove yourself from the test group for a feature you could
easily launch it to the whole world. Design the UI defensively so that it
is hard to turn features on to everyone and/or obvious when a feature is
launching and this shouldn't be a problem.
- The rest were through CSS or JS changes that mentioned new features being
shipped to the client as part of static resource packaging or because
the code was just added to existing files. If this is a risk you're
concerned about, consider integration with static resource management.
In general, you can start with a very simple system and expand it as it makes
sense. Even a simple system can let you move away from feature branches.
= Next Steps =
Continue by:
- reading recommendations on structuring revision control with
@{article:Recommendations on Revision Control}; or
- reading recommendations on structuring changes with
@{article:Writing Reviewable Code}.
diff --git a/src/docs/flavor/recommendations_on_revision_control.diviner b/src/docs/flavor/recommendations_on_revision_control.diviner
index 8e30e2f4f1..3e5a5a791f 100644
--- a/src/docs/flavor/recommendations_on_revision_control.diviner
+++ b/src/docs/flavor/recommendations_on_revision_control.diviner
@@ -1,92 +1,92 @@
@title Recommendations on Revision Control
@group review
Project recommendations on how to organize revision control.
-This document is purely advisory. Phabricator works with a variety of revision
+This document is purely advisory. Phorge works with a variety of revision
control strategies, and diverging from the recommendations in this document
will not impact your ability to use it for code review and source management.
-This is my (epriestley's) personal take on the issue and not necessarily
-representative of the views of the Phabricator team as a whole.
+This is Evan's personal take on the issue and not necessarily
+representative of the views of the Phorge team as a whole.
= Overview =
There are a few ways to use SVN, a few ways to use Mercurial, and many many many
ways to use Git. Particularly with Git, every project does things differently,
and all these approaches are valid for small projects. When projects scale,
strategies which enforce **one idea is one commit** are better.
= One Idea is One Commit =
Choose a strategy where **one idea is one commit** in the authoritative
master/remote version of the repository. Specifically, this means that an entire
conceptual changeset ("add a foo widget") is represented in the remote as
exactly one commit (in some form), not a sequence of checkpoint commits.
- In SVN, this means don't `commit` until after an idea has been completely
written. All reasonable SVN workflows naturally enforce this.
- In Git, this means squashing checkpoint commits as you go (with `git commit
--amend`) or before pushing (with `git rebase -i` or `git merge
--squash`), or having a strict policy where your master/trunk contains only
merge commits and each is a merge between the old master and a branch which
represents a single idea. Although this preserves the checkpoint commits
along the branches, you can view master alone as a series of single-idea
commits.
- In Mercurial, you can use the "queues" extension before 2.2 or `--amend`
after Mercurial 2.2, or wait to commit until a change is complete (like
SVN), although the latter is not recommended. Without extensions, older
versions of Mercurial do not support liberal mutability doctrines (so you
can't ever combine checkpoint commits) and do not let you build a default
out of only merge commits, so it is not possible to have an authoritative
repository where one commit represents one idea in any real sense.
= Why This Matters =
A strategy where **one idea is one commit** has no real advantage over any other
strategy until your repository hits a velocity where it becomes critical. In
particular:
- Essentially all operations against the master/remote repository are about
ideas, not commits. When one idea is many commits, everything you do is more
complicated because you need to figure out which commits represent an idea
("the foo widget is broken, what do I need to revert?") or what idea is
ultimately represented by a commit ("commit af3291029 makes no sense, what
goal is this change trying to accomplish?").
- Release engineering is greatly simplified. Release engineers can pick or
drop ideas easily when each idea corresponds to one commit. When an idea
is several commits, it becomes easier to accidentally pick or drop half of
an idea and end up in a state which is virtually guaranteed to be wrong.
- Automated testing is greatly simplified. If each idea is one commit, you
can run automated tests against every commit and test failures indicate a
serious problem. If each idea is many commits, most of those commits
represent a known broken state of the codebase (e.g., a checkpoint with a
syntax error which was fixed in the next checkpoint, or with a
half-implemented idea).
- Understanding changes is greatly simplified. You can bisect to a break and
identify the entire idea trivially, without fishing forward and backward in
the log to identify the extents of the idea. And you can be confident in
what you need to revert to remove the entire idea.
- There is no clear value in having checkpoint commits (some of which are
guaranteed to be known broken versions of the repository) persist into the
remote. Consider a theoretical VCS which automatically creates a checkpoint
commit for every keystroke. This VCS would obviously be unusable. But many
checkpoint commits aren't much different, and conceptually represent some
relatively arbitrary point in the sequence of keystrokes that went into
writing a larger idea. Get rid of them or create an abstraction layer (merge
commits) which allows you to ignore them when you are trying to understand
the repository in terms of ideas (which is almost always).
All of these become problems only at scale. Facebook pushes dozens of ideas
every day and thousands on a weekly basis, and could not do this (at least, not
without more people or more errors) without choosing a repository strategy where
**one idea is one commit**.
= Next Steps =
Continue by:
- reading recommendations on structuring branches with
@{article:Recommendations on Branching}; or
- reading recommendations on structuring changes with
@{article:Writing Reviewable Code}.
diff --git a/src/docs/flavor/so_many_databases.diviner b/src/docs/flavor/so_many_databases.diviner
index 74fb21332f..cde2c93825 100644
--- a/src/docs/flavor/so_many_databases.diviner
+++ b/src/docs/flavor/so_many_databases.diviner
@@ -1,130 +1,130 @@
-@title Why does Phabricator need so many databases?
+@title Why does Phorge need so many databases?
@group lore
-Phabricator uses about 60 databases (and we may have added more by the time you
+Phorge uses about 60 databases (and we may have added more by the time you
read this document). This sometimes comes as a surprise, since you might assume
it would only use one database.
The approach we use is designed to work at scale for huge installs with many
thousands of users. We care a lot about working well for large installs, and
about scaling up gracefully to meet the needs of growing organizations. We want
-small startups to be able to install Phabricator and have it grow with them as
+small startups to be able to install Phorge and have it grow with them as
they expand to many thousands of employees.
-A cost of this approach is that it makes Phabricator more difficult to install
+A cost of this approach is that it makes Phorge more difficult to install
on shared hosts which require a lot of work to create or authorize access to
-each database. However, Phabricator does a lot of advanced or complex things
+each database. However, Phorge does a lot of advanced or complex things
which are difficult to configure or manage on shared hosts, and we don't
recommend installing it on a shared host. The install documentation explicitly
discourages installing on shared hosts.
Broadly, in cases where we must choose between operating well at scale for
growing organizations and installing easily on shared hosts, we prioritize
operating at scale.
Listing Databases
=================
-You can get a full list of the databases Phabricator needs with `bin/storage
+You can get a full list of the databases Phorge needs with `bin/storage
databases`. It will look something like this:
```
-$ /core/lib/phabricator/bin/storage databases
+$ /core/lib/phorge/bin/storage databases
secure_audit
secure_calendar
secure_chatlog
secure_conduit
secure_countdown
secure_daemon
secure_differential
secure_draft
secure_drydock
secure_feed
...<dozens more databases>...
```
Roughly, each application has its own database, and then there are some
databases which support internal systems or shared infrastructure.
Operating at Scale
==================
This storage design is aimed at large installs that may need more than one
physical database server to handle the load the install generates.
The primary reason we use a separate database for each application is to allow
large installs to scale up by spreading database load across more hardware. A
large organization with many thousands of active users may find themselves
limited by the capacity of a single database backend.
If so, they can launch a second backend, move some applications over to it, and
continue piling on more users.
This can't continue forever, but provides a substantial amount of headroom for
large installs to spread the workload across more hardware and continue scaling
up.
To make this possible, we put each application in its own database and use
database boundaries to enforce the logical constraints that the application
must have in order for this to work. For example, we can not perform joins
between separable tables, because they may not be on the same hardware.
Establishing boundaries with application databases is a simple, straightforward
way to partition storage and make administrative operations like spreading load
realistic.
Ease of Development
===================
This design is also easier for us to work with, and easier for users who
want to work with the raw data in the database.
We have a large number of tables (more than 400) and we can not reasonably
reduce the number of tables very much (each table generally represents some
meaningful type of object in some application). It's easier to develop with
tables which are organized into separate application databases, just like it's
easier to work with a large project if you organize source files into
directories.
-If you aren't developing Phabricator and never look at the data in the
+If you aren't developing Phorge and never look at the data in the
database, you probably won't benefit from this organization. However, if you
-are a developer or want to extend Phabricator or look under the hood, it's
+are a developer or want to extend Phorge or look under the hood, it's
easier to find what you're looking for and work with the tables when they're
organized by application.
More Databases Cost Nothing
===========================
In almost all cases, creating more databases has zero cost, just like
organizing source code into directories has zero cost. Even if we didn't derive
enormous benefits from this approach at scale, there is little reason //not//
to organize storage like this.
There are a handful of administrative tasks which are very slightly more
complex to perform on multiple databases, but these are all either automated
with `bin/storage` or easy to build on top of the list of databases emitted by
`bin/storage databases`.
For example, you can dump all the databases with `bin/storage dump`, and you
can destroy all the databases with `bin/storage destroy`.
As mentioned above, an exception to this is that if you're installing on a
shared host and need to jump through hoops to individually authorize access to
each database, databases do cost something.
However, this cost is an artificial cost imposed by the selected environment,
and this is only the first of many issues you'll run into trying to install and
-run Phabricator on a shared host. These issues are why we strongly discourage
+run Phorge on a shared host. These issues are why we strongly discourage
using shared hosts, and recommend against them in the install guide.
Next Steps
==========
Continue by:
- learning more about databases in @{article:Database Schema}.
diff --git a/src/docs/flavor/soon_static_resources.diviner b/src/docs/flavor/soon_static_resources.diviner
index 96f28cfe2f..79fbc29819 100644
--- a/src/docs/flavor/soon_static_resources.diviner
+++ b/src/docs/flavor/soon_static_resources.diviner
@@ -1,126 +1,126 @@
@title Things You Should Do Soon: Static Resources
@group sundry
Over time, you'll write more JS and CSS and eventually need to put systems in
place to manage it.
This is part of @{article:Things You Should Do Soon}, which describes
architectural problems in web applications which you should begin to consider
before you encounter them.
= Manage Dependencies Automatically =
The naive way to add static resources to a page is to include them at the top
of the page, before rendering begins, by enumerating filenames. Facebook used to
work like that:
COUNTEREXAMPLE
<?php
require_js('js/base.js');
require_js('js/utils.js');
require_js('js/ajax.js');
require_js('js/dialog.js');
// ...
This was okay for a while but had become unmanageable by 2007. Because
dependencies were managed completely manually and you had to explicitly list
every file you needed in the right order, everyone copy-pasted a giant block
of this stuff into every page. The major problem this created was that each page
pulled in way too much JS, which slowed down frontend performance.
We moved to a system (called //Haste//) which declared JS dependencies in the
files using a docblock-like header:
/**
* @provides dialog
* @requires utils ajax base
*/
We annotated files manually, although theoretically you could use static
analysis instead (we couldn't realistically do that, our JS was pretty
unstructured). This allowed us to pull in the entire dependency chain of
component with one call:
require_static('dialog');
...instead of copy-pasting every dependency.
= Include When Used =
The other part of this problem was that all the resources were required at the
top of the page instead of when they were actually used. This meant two things:
- you needed to include every resource that //could ever// appear on a page;
- if you were adding something new to 2+ pages, you had a strong incentive to
put it in base.js.
So every page pulled in a bunch of silly stuff like the CAPTCHA code (because
there was one obscure workflow involving unverified users which could
theoretically show any user a CAPTCHA on any page) and every random thing anyone
had stuck in base.js.
We moved to a system where JS and CSS tags were output **after** page rendering
had run instead (they still appeared at the top of the page, they were just
prepended rather than appended before being output to the browser -- there are
some complexities here, but they are beyond the immediate scope), so
require_static() could appear anywhere in the code. Then we moved all the
require_static() calls to be proximate to their use sites (so dialog rendering
code would pull in dialog-related CSS and JS, for example, not any page which
might need a dialog), and split base.js into a bunch of smaller files.
= Packaging =
The biggest frontend performance killer in most cases is the raw number of HTTP
requests, and the biggest hammer for addressing it is to package related JS
and CSS into larger files, so you send down all the core JS code in one big file
instead of a lot of smaller ones. Once the other groundwork is in place, this is
a relatively easy change. We started with manual package definitions and
eventually moved to automatic generation based on production data.
= Caches and Serving Content =
In the simplest implementation of static resources, you write out a raw JS tag
with something like `src="/js/base.js"`. This will break disastrously as you
scale, because clients will be running with stale versions of resources. There
are bunch of subtle problems (especially once you have a CDN), but the big one
is that if a user is browsing your site as you push/deploy, their client will
not make requests for the resources they already have in cache, so even if your
servers respond correctly to If-None-Match (ETags) and If-Modified-Since
(Expires) the site will appear completely broken to everyone who was using it
when you push a breaking change to static resources.
The best way to solve this problem is to version your resources in the URI,
so each version of a resource has a unique URI:
rsrc/af04d14/js/base.js
When you push, users will receive pages which reference the new URI so their
browsers will retrieve it.
**But**, there's a big problem, once you have a bunch of web frontends:
While you're pushing, a user may make a request which is handled by a server
running the new version of the code, which delivers a page with a new resource
URI. Their browser then makes a request for the new resource, but that request
is routed to a server which has not been pushed yet, which delivers an old
version of the resource. They now have a poisoned cache: old resource data for
a new resource URI.
You can do a lot of clever things to solve this, but the solution we chose at
Facebook was to serve resources out of a database instead of off disk. Before a
push begins, new resources are written to the database so that every server is
able to satisfy both old and new resource requests.
This also made it relatively easy to do processing steps (like stripping
comments and whitespace) in one place, and just insert a minified/processed
version of CSS and JS into the database.
= Reference Implementation: Celerity =
-Some of the ideas discussed here are implemented in Phabricator's //Celerity//
+Some of the ideas discussed here are implemented in Phorge's's //Celerity//
system, which is essentially a simplified version of the //Haste// system used
by Facebook.
diff --git a/src/docs/flavor/things_you_should_do_now.diviner b/src/docs/flavor/things_you_should_do_now.diviner
index 0d3b4135ba..58a5b8eaea 100644
--- a/src/docs/flavor/things_you_should_do_now.diviner
+++ b/src/docs/flavor/things_you_should_do_now.diviner
@@ -1,138 +1,138 @@
@title Things You Should Do Now
@group sundry
Describes things you should do now when building software, because the cost to
do them increases over time and eventually becomes prohibitive or impossible.
= Overview =
If you're building a hot new web startup, there are a lot of decisions to make
about what to focus on. Most things you'll build will take about the same amount
of time to build regardless of what order you build them in, but there are a few
technical things which become vastly more expensive to fix later.
If you don't do these things early in development, they'll become very hard or
impossible to do later. This is basically a list of things that would have saved
Facebook huge amounts of time and effort down the road if someone had spent
a tiny amount of time on them earlier in the development process.
See also @{article:Things You Should Do Soon} for things that scale less
drastically over time.
= Start IDs At a Gigantic Number =
If you're using integer IDs to identify data or objects, **don't** start your
IDs at 1. Start them at a huge number (e.g., 2^33) so that no object ID will
ever appear in any other role in your application (like a count, a natural
index, a byte size, a timestamp, etc). This takes about 5 seconds if you do it
before you launch and rules out a huge class of nasty bugs for all time. It
becomes incredibly difficult as soon as you have production data.
The kind of bug that this causes is accidental use of some other value as an ID:
COUNTEREXAMPLE
// Load the user's friends, returns a map of friend_id => true
$friend_ids = user_get_friends($user_id);
// Get the first 8 friends.
$first_few_friends = array_slice($friend_ids, 0, 8);
// Render those friends.
render_user_friends($user_id, array_keys($first_few_friends));
Because array_slice() in PHP discards array indices and renumbers them, this
doesn't render the user's first 8 friends but the users with IDs 0 through 7,
e.g. Mark Zuckerberg (ID 4) and Dustin Moskovitz (ID 6). If you have IDs in this
range, sooner or later something that isn't an ID will get treated like an ID
and the operation will be valid and cause unexpected behavior. This is
completely avoidable if you start your IDs at a gigantic number.
= Only Store Valid UTF-8 =
For the most part, you can ignore UTF-8 and unicode until later. However, there
is one aspect of unicode you should address now: store only valid UTF-8 strings.
Assuming you're storing data internally as UTF-8 (this is almost certainly the
right choice and definitely the right choice if you have no idea how unicode
works), you just need to sanitize all the data coming into your application and
make sure it's valid UTF-8.
If your application emits invalid UTF-8, other systems (like browsers) will
break in unexpected and interesting ways. You will eventually be forced to
ensure you emit only valid UTF-8 to avoid these problems. If you haven't
sanitized your data, you'll basically have two options:
- do a huge migration on literally all of your data to sanitize it; or
- forever sanitize all data on its way out on the read pathways.
As of 2011 Facebook is in the second group, and spends several milliseconds of
CPU time sanitizing every display string on its way to the browser, which
multiplies out to hundreds of servers worth of CPUs sitting in a datacenter
paying the price for the invalid UTF-8 in the databases.
You can likely learn enough about unicode to be confident in an implementation
which addresses this problem within a few hours. You don't need to learn
everything, just the basics. Your language probably already has a function which
does the sanitizing for you.
= Never Design a Blacklist-Based Security System =
When you have an alternative, don't design security systems which are default
permit, blacklist-based, or otherwise attempt to enumerate badness. When
Facebook launched Platform, it launched with a blacklist-based CSS filter, which
basically tried to enumerate all the "bad" parts of CSS and filter them out.
This was a poor design choice and lead to basically infinite security holes for
all time.
It is very difficult to enumerate badness in a complex system and badness is
often a moving target. Instead of trying to do this, design whitelist-based
security systems where you list allowed things and reject anything you don't
understand. Assume things are bad until you verify that they're OK.
It's tempting to design blacklist-based systems because they're easier to write
and accept more inputs. In the case of the CSS filter, the product goal was for
users to just be able to use CSS normally and feel like this system was no
different from systems they were familiar with. A whitelist-based system would
reject some valid, safe inputs and create product friction.
But this is a much better world than the alternative, where the blacklist-based
system fails to reject some dangerous inputs and creates //security holes//. It
//also// creates product friction because when you fix those holes you break
existing uses, and that backward-compatibility friction makes it very difficult
to move the system from a blacklist to a whitelist. So you're basically in
trouble no matter what you do, and have a bunch of security holes you need to
unbreak immediately, so you won't even have time to feel sorry for yourself.
Designing blacklist-based security is one of the worst now-vs-future tradeoffs
you can make. See also "The Six Dumbest Ideas in Computer Security":
http://www.ranum.com/security/computer_security/
= Fail Very Loudly when SQL Syntax Errors Occur in Production =
This doesn't apply if you aren't using SQL, but if you are: detect when a query
fails because of a syntax error (in MySQL, it is error 1064). If the failure
happened in production, fail in the loudest way possible. (I implemented this in
2008 at Facebook and had it just email me and a few other people directly. The
system was eventually refined.)
This basically creates a high-signal stream that tells you where you have SQL
injection holes in your application. It will have some false positives and could
theoretically have false negatives, but at Facebook it was pretty high signal
considering how important the signal is.
Of course, the real solution here is to not have SQL injection holes in your
application, ever. As far as I'm aware, this system correctly detected the one
SQL injection hole we had from mid-2008 until I left in 2011, which was in a
hackathon project on an underisolated semi-production tier and didn't use the
query escaping system the rest of the application does.
Hopefully, whatever language you're writing in has good query libraries that
can handle escaping for you. If so, use them. If you're using PHP and don't have
-a solution in place yet, the Phabricator implementation of `qsprintf()` is
+a solution in place yet, the Phorge implementation of `qsprintf()` is
similar to Facebook's system and was successful there.
diff --git a/src/docs/flavor/writing_reviewable_code.diviner b/src/docs/flavor/writing_reviewable_code.diviner
index edff09f7d0..8cebc5d3e4 100644
--- a/src/docs/flavor/writing_reviewable_code.diviner
+++ b/src/docs/flavor/writing_reviewable_code.diviner
@@ -1,160 +1,160 @@
@title Writing Reviewable Code
@group review
Project recommendations on how to structure changes.
-This document is purely advisory. Phabricator works with a variety of revision
+This document is purely advisory. Phorge works with a variety of revision
control strategies, and diverging from the recommendations in this document
will not impact your ability to use it for code review and source management.
= Overview =
This document describes a strategy for structuring changes used successfully at
-Facebook and in Phabricator. In essence:
+Facebook and in Phorge. In essence:
- Each commit should be as small as possible, but no smaller.
- The smallest a commit can be is a single cohesive idea: don't make commits
so small that they are meaningless on their own.
- There should be a one-to-one mapping between ideas and commits:
each commit should build one idea, and each idea should be implemented by
one commit.
- Turn large commits into small commits by dividing large problems into
smaller problems and solving the small problems one at a time.
- Write sensible commit messages.
= Many Small Commits =
Small, simple commits are generally better than large, complex commits. They are
easier to understand, easier to test, and easier to review. The complexity of
understanding, testing and reviewing a change often increases faster than its
size: ten 200-line changes each doing one thing are often far easier to
understand than one 2,000 line change doing ten things. Splitting a change which
does many things into smaller changes which each do only one thing can decrease
the total complexity associated with accomplishing the same goal.
Each commit should do one thing. Generally, this means that you should separate
distinct changes into different commits when developing. For example, if you're
developing a feature and run into a preexisting bug, stash or checkpoint your
change, check out a clean HEAD/tip, fix the bug in one change, and then
merge/rebase your new feature on top of your bugfix so that you have two
changes, each with one idea ("add feature x", "fix a bug in y"), not one change
with two ideas ("add feature x and fix a bug in y").
(In Git, you can do this easily with local feature branches and commands like
`git rebase`, `git rebase -i`, and `git stash`, or with merges. In Mercurial,
you can use bookmarks or the queues extension. In SVN, there are few builtin
tools, but you can use multiple working copies or treat Differential like a
stash you access with `arc patch`.)
Even changes like fixing style problems should ideally be separated: they're
accomplishing a different goal. And it is far easier to review one 300-line
change which "converts tabs to spaces" plus one 30-line change which "implements
feature z" than one 330-line change which "implements feature z and also
converts a bunch of tabs to spaces".
Similarly, break related but complex changes into smaller, simpler components.
Here's a ridiculous analogy: if you're adding a new house, don't make one
5,000-line change which adds the whole house in one fell sweep. Split it apart
into smaller steps which are each easy to understand: start with the foundation,
then build the frame, etc. If you decided to dig the foundation with a shovel or
build the frame out of cardboard, it's both easier to miss and harder to correct
if the decisions are buried in 5,000 lines of interior design and landscaping.
Do it one piece at a time, providing enough context that the larger problem
can be understood but accomplishing no more with each step than you need to in
order for it to stand on its own.
The minimum size of a change should be a complete implementation of the simplest
subproblem which works on its own and expresses an entire idea, not just part
of an idea. You could mechanically split a 1,000-line change into ten 100-line
changes by choosing lines at random, but none of the individual changes would
make any sense and you would increase the collective complexity. The real goal
is for each change to have minimal complexity, line size is just a proxy that is
often well-correlated with complexity.
-We generally follow these practices in Phabricator. The median change size for
-Phabricator is 35 lines.
+We generally follow these practices in Phorge. The median change size for
+Phorge is 35 lines.
= Write Sensible Commit Messages =
There are lots of resources for this on the internet. All of them say pretty
much the same thing; this one does too.
The single most important thing is: **commit messages should explain //why// you
are making the change**.
Differential attempts to encourage the construction of sensible commit messages,
but can only enforce structure, not content. Structurally, commit messages
should probably:
- Have a title, briefly describing the change in one line.
- Have a summary, describing the change in more detail.
- Maybe have some other fields.
The content is far more important than the structure. In particular, the summary
should explain //why// you're making the change and //why// you're choosing the
implementation you're choosing. The //what// of the change is generally
well-explained by the change itself. For example, this is obviously an awful
commit message:
COUNTEREXAMPLE
fix a bug
But this one is almost as bad:
COUNTEREXAMPLE
Allow dots in usernames
Change the regexps so usernames can have dots in them.
This is better than nothing but just summarizes information which can be
inferred from the text of the diff. Instead, you should provide context and
explain why you're making the change you're making, and why it's the right one:
lang=txt
Allow dots in usernames to support Google and LDAP auth
To prevent nonsense, usernames are currently restricted to A-Z0-9. Now that
we have Google and LDAP auth, a couple of installs want to allow "." too
since they have schemes like "abraham.lincoln@mycompany.com" (see Tnnn). There
are no technical reasons not to do this, so I opened up the regexps a bit.
We could mostly open this up more but I figured I'd wait until someone asks
before allowing "ke$ha", etc., because I personally find such names
distasteful and offensive.
This information can not be extracted from the change itself, and is much more
useful for the reviewer and for anyone trying to understand the change after the
fact.
An easy way to explain //why// is to reference other objects
(bugs/issues/revisions) which motivate the change.
Differential also includes a "Test Plan" field which is required by default.
There is a detailed description of this field in @{article:Differential User
Guide: Test Plans}. You can make it optional or disable it in the configuration,
but consider adopting it. Having this information can be particularly helpful
for reviewers.
Some things that people sometimes feel strongly about but which are probably not
really all that important in commit messages include:
- If/where text is wrapped.
- Maximum length of the title.
- Whether there should be a period or not in the title.
- Use of voice/tense, e.g. "fix"/"add" vs "fixes"/"adds".
- Other sorts of pedantry not related to getting the context and
reasons //why// a change is happening into the commit message.
- Although maybe the spelling and grammar shouldn't be egregiously bad?
-Phabricator does not have guidelines for this stuff. You can obviously set
+Phorge does not have guidelines for this stuff. You can obviously set
guidelines at your organization if you prefer, but getting the //why// into the
message is the most important part.
= Next Steps =
Continue by:
- reading recommendations on structuring revision control with
@{article:Recommendations on Revision Control}; or
- reading recommendations on structuring branches with
@{article:Recommendations on Branching}.
diff --git a/src/docs/user/cluster/cluster.diviner b/src/docs/user/cluster/cluster.diviner
index 2362b80485..10f8e98e4c 100644
--- a/src/docs/user/cluster/cluster.diviner
+++ b/src/docs/user/cluster/cluster.diviner
@@ -1,342 +1,342 @@
@title Clustering Introduction
@group cluster
-Guide to configuring Phabricator across multiple hosts for availability and
+Guide to configuring Phorge across multiple hosts for availability and
performance.
Overview
========
WARNING: This feature is a prototype. Installs should expect a challenging
adventure when deploying clusters. In the best of times, configuring a
cluster is complex and requires significant operations experience.
-Phabricator can be configured to run on multiple hosts with redundant services
+Phorge can be configured to run on multiple hosts with redundant services
to improve its availability and scalability, and make disaster recovery much
easier.
Clustering is more complex to setup and maintain than running everything on a
single host, but greatly reduces the cost of recovering from hardware and
network failures.
-Each Phabricator service has an array of clustering options that can be
+Each Phorge service has an array of clustering options that can be
configured somewhat independently. Configuring a cluster is inherently complex,
and this is an advanced feature aimed at installs with large userbases and
experienced operations personnel who need this high degree of flexibility.
The remainder of this document summarizes how to add redundancy to each
service and where your efforts are likely to have the greatest impact.
For additional guidance on setting up a cluster, see "Overlaying Services"
and "Cluster Recipes" at the bottom of this document.
Clusterable Services
====================
This table provides an overview of clusterable services, their setup
complexity, and the rough impact that converting them to run on multiple hosts
will have on availability, resistance to data loss, and scalability.
| Service | Setup | Availability | Loss Resistance | Scalability
|---------|-------|--------------|-----------|------------
| **Databases** | Moderate | **High** | **High** | Low
| **Repositories** | Complex | Moderate | **High** | Moderate
| **Daemons** | Minimal | Low | No Risk | Low
| **SSH Servers** | Minimal | Low | No Risk | Low
| **Web Servers** | Minimal | **High** | No Risk | Moderate
| **Notifications** | Minimal | Low | No Risk | Low
| **Fulltext Search** | Minimal | Low | No Risk | Low
See below for a walkthrough of these services in greater detail.
Preparing for Clustering
========================
-To begin deploying Phabricator in cluster mode, set up `cluster.addresses`
+To begin deploying Phorge in cluster mode, set up `cluster.addresses`
in your configuration.
This option should contain a list of network address blocks which are considered
to be part of the cluster. Hosts in this list are allowed to bend (or even
break) some of the security and policy rules when they make requests to other
hosts in the cluster, so this list should be as small as possible. See "Cluster
Whitelist Security" below for discussion.
If you are deploying hardware in EC2, a reasonable approach is to launch a
-dedicated Phabricator VPC, whitelist the whole VPC as a Phabricator cluster,
-and then deploy only Phabricator services into that VPC.
+dedicated Phorge VPC, whitelist the whole VPC as a Phorge cluster,
+and then deploy only Phorge services into that VPC.
If you have additional auxiliary hosts which run builds and tests via Drydock,
you should //not// include them in the cluster address definition. For more
detailed discussion of the Drydock security model, see
@{article:Drydock User Guide: Security}.
Most other clustering features will not work until you define a cluster by
configuring `cluster.addresses`.
Cluster Whitelist Security
========================
When you configure `cluster.addresses`, you should keep the list of trusted
cluster hosts as small as possible. Hosts on this list gain additional
capabilities, including these:
-**Trusted HTTP Headers**: Normally, Phabricator distrusts the load balancer
+**Trusted HTTP Headers**: Normally, Phorge distrusts the load balancer
HTTP headers `X-Forwarded-For` and `X-Forwarded-Proto` because they may be
client-controlled and can be set to arbitrary values by an attacker if no load
balancer is deployed. In particular, clients can set `X-Forwarded-For` to any
value and spoof traffic from arbitrary remotes.
These headers are trusted when they are received from a host on the cluster
address whitelist. This allows requests from cluster loadbalancers to be
interpreted correctly by default without requiring additional custom code or
configuration.
**Intracluster HTTP**: Requests from cluster hosts are not required to use
HTTPS, even if `security.require-https` is enabled, because it is common to
terminate HTTPS on load balancers and use plain HTTP for requests within a
cluster.
**Special Authentication Mechanisms**: Cluster hosts are allowed to connect to
other cluster hosts with "root credentials", and to impersonate any user
account.
The use of root credentials is required because the daemons must be able to
bypass policies in order to function properly: they need to send mail about
private conversations and import commits in private repositories.
The ability to impersonate users is required because SSH nodes must receive,
interpret, modify, and forward SSH traffic. They can not use the original
credentials to do this because SSH authentication is asymmetric and they do not
have the user's private key. Instead, they use root credentials and impersonate
the user within the cluster.
These mechanisms are still authenticated (and use asymmetric keys, like SSH
does), so access to a host in the cluster address block does not mean that an
attacker can immediately compromise the cluster. However, an over-broad cluster
address whitelist may give an attacker who gains some access additional tools
to escalate access.
Note that if an attacker gains access to an actual cluster host, these extra
powers are largely moot. Most cluster hosts must be able to connect to the
master database to function properly, so the attacker will just do that and
freely read or modify whatever data they want.
Cluster: Databases
=================
Configuring multiple database hosts is moderately complex, but normally has the
highest impact on availability and resistance to data loss. This is usually the
most important service to make redundant if your focus is on availability and
disaster recovery.
-Configuring replicas allows Phabricator to run in read-only mode if you lose
+Configuring replicas allows Phorge to run in read-only mode if you lose
the master and to quickly promote the replica as a replacement.
For details, see @{article:Cluster: Databases}.
Cluster: Repositories
=====================
Configuring multiple repository hosts is complex, but is required before you
can add multiple daemon or web hosts.
Repository replicas are important for availability if you host repositories
-on Phabricator, but less important if you host repositories elsewhere
+on Phorge, but less important if you host repositories elsewhere
(instead, you should focus on making that service more available).
The distributed nature of Git and Mercurial tend to mean that they are
naturally somewhat resistant to data loss: every clone of a repository includes
the entire history.
Repositories may become a scalability bottleneck, although this is rare unless
your install has an unusually heavy repository read volume. Slow clones/fetches
may hint at a repository capacity problem. Adding more repository hosts will
provide an approximately linear increase in capacity.
For details, see @{article:Cluster: Repositories}.
Cluster: Daemons
================
Configuring multiple daemon hosts is straightforward, but you must configure
repositories first.
With daemons running on multiple hosts you can transparently survive the loss
of any subset of hosts without an interruption to daemon services, as long as
at least one host remains alive. Daemons are stateless, so spreading daemons
across multiple hosts provides no resistance to data loss.
Daemons can become a bottleneck, particularly if your install sees a large
volume of write traffic to repositories. If the daemon task queue has a
backlog, that hints at a capacity problem. If existing hosts have unused
resources, increase `phd.taskmasters` until they are fully utilized. From
there, adding more daemon hosts will provide an approximately linear increase
in capacity.
For details, see @{article:Cluster: Daemons}.
Cluster: SSH Servers
====================
Configuring multiple SSH hosts is straightforward, but you must configure
repositories first.
With multiple SSH hosts you can transparently survive the loss of any subset
of hosts without interruption to repository services, as long as at last one
host remains alive. SSH services are stateless, so putting multiple hosts in
service provides no resistance to data loss because no data is at risk.
SSH hosts are very rarely a scalability bottleneck.
For details, see @{article:Cluster: SSH Servers}.
Cluster: Web Servers
====================
Configuring multiple web hosts is straightforward, but you must configure
repositories first.
With multiple web hosts you can transparently survive the loss of any subset
of hosts as long as at least one host remains alive. Web services are stateless,
so putting multiple hosts in service provides no resistance to data loss
because no data is at risk.
Web hosts can become a bottleneck, particularly if you have a workload that is
heavily focused on reads from the web UI (like a public install with many
anonymous users). Slow responses to web requests may hint at a web capacity
problem. Adding more hosts will provide an approximately linear increase in
capacity.
For details, see @{article:Cluster: Web Servers}.
Cluster: Notifications
======================
Configuring multiple notification hosts is simple and has no pre-requisites.
With multiple notification hosts, you can survive the loss of any subset of
hosts as long as at least one host remains alive. Service may be briefly
disrupted directly after the incident which destroys the other hosts.
Notifications are noncritical, so this normally has little practical impact
on service availability. Notifications are also stateless, so clustering this
service provides no resistance to data loss because no data is at risk.
Notification delivery normally requires very few resources, so adding more
hosts is unlikely to have much impact on scalability.
For details, see @{article:Cluster: Notifications}.
Cluster: Fulltext Search
========================
Configuring search services is relatively simple and has no pre-requisites.
-By default, Phabricator uses MySQL as a fulltext search engine, so deploying
+By default, Phorge uses MySQL as a fulltext search engine, so deploying
multiple database hosts will effectively also deploy multiple fulltext search
hosts.
Search indexes can be completely rebuilt from the database, so there is no
risk of data loss no matter how fulltext search is configured.
For details, see @{article:Cluster: Search}.
Overlaying Services
===================
Although hosts can run a single dedicated service type, certain groups of
-services work well together. Phabricator clusters usually do not need to be
+services work well together. Phorge clusters usually do not need to be
very large, so deploying a small number of hosts with multiple services is a
good place to start.
In planning a cluster, consider these blended host types:
**Everything**: Run HTTP, SSH, MySQL, notifications, repositories and daemons
on a single host. This is the starting point for single-node setups, and
usually also the best configuration when adding the second node.
**Everything Except Databases**: Run HTTP, SSH, notifications, repositories and
daemons on one host, and MySQL on a different host. MySQL uses many of the same
resources that other services use. It's also simpler to separate than other
services, and tends to benefit the most from dedicated hardware.
**Repositories and Daemons**: Run repositories and daemons on the same host.
Repository hosts //must// run daemons, and it normally makes sense to
completely overlay repositories and daemons. These services tend to use
different resources (repositories are heavier on I/O and lighter on CPU/RAM;
daemons are heavier on CPU/RAM and lighter on I/O).
Repositories and daemons are also both less latency sensitive than other
service types, so there's a wider margin of error for under provisioning them
before performance is noticeably affected.
These nodes tend to use system resources in a balanced way. Individual nodes
in this class do not need to be particularly powerful.
**Frontend Servers**: Run HTTP and SSH on the same host. These are easy to set
up, stateless, and you can scale the pool up or down easily to meet demand.
Routing both types of ingress traffic through the same initial tier can
simplify load balancing.
These nodes tend to need relatively little RAM.
Cluster Recipes
===============
This section provides some guidance on reasonable ways to scale up a cluster.
The smallest possible cluster is **two hosts**. Run everything (web, ssh,
database, notifications, repositories, and daemons) on each host. One host will
serve as the master; the other will serve as a replica.
Ideally, you should physically separate these hosts to reduce the chance that a
natural disaster or infrastructure disruption could disable or destroy both
hosts at the same time.
From here, you can choose how you expand the cluster.
To improve **scalability and performance**, separate loaded services onto
dedicated hosts and then add more hosts of that type to increase capacity. If
you have a two-node cluster, the best way to improve scalability by adding one
host is likely to separate the master database onto its own host.
Note that increasing scale may //decrease// availability by leaving you with
too little capacity after a failure. If you have three hosts handling traffic
and one datacenter fails, too much traffic may be sent to the single remaining
host in the surviving datacenter. You can hedge against this by mirroring new
hosts in other datacenters (for example, also separate the replica database
onto its own host).
After separating databases, separating repository + daemon nodes is likely
the next step to consider.
To improve **availability**, add another copy of everything you run in one
datacenter to a new datacenter. For example, if you have a two-node cluster,
the best way to improve availability is to run everything on a third host in a
third datacenter. If you have a 6-node cluster with a web node, a database node
and a repo + daemon node in two datacenters, add 3 more nodes to create a copy
of each node in a third datacenter.
You can continue adding hosts until you run out of hosts.
Next Steps
==========
Continue by:
- learning how Phacility configures and operates a large, multi-tenant
production cluster in ((cluster)).
diff --git a/src/docs/user/cluster/cluster_daemons.diviner b/src/docs/user/cluster/cluster_daemons.diviner
index 8cde3e7b7d..53495280ff 100644
--- a/src/docs/user/cluster/cluster_daemons.diviner
+++ b/src/docs/user/cluster/cluster_daemons.diviner
@@ -1,57 +1,57 @@
@title Cluster: Daemons
@group cluster
-Configuring Phabricator to use multiple daemon hosts.
+Configuring Phorge to use multiple daemon hosts.
Overview
========
You can run daemons on multiple hosts. The advantages of doing this are:
- you can completely survive the loss of multiple daemon hosts; and
- worker queue throughput may improve.
This configuration is simple, but you must configure repositories first. For
details, see @{article:Cluster: Repositories}.
Since repository hosts must run daemons anyway, you usually do not need to do
any additional work and can skip this entirely if you have already configured
multiple repository hosts.
Adding Daemon Hosts
===================
After configuring repositories for clustering, launch daemons on every
repository host according to the documentation in
@{article:Cluster: Repositories}. These daemons are necessary: repositories
will not fetch, update, or synchronize properly without them.
If your repository clustering is redundant (you have at least two repository
hosts), these daemons are also likely to be sufficient in most cases. If you
want to launch additional hosts anyway (for example, to increase queue capacity
for unusual workloads), see "Dedicated Daemon Hosts" below.
Dedicated Daemon Hosts
======================
You can launch additional daemon hosts without any special configuration.
Daemon hosts must be able to reach other hosts on the network, but do not need
-to run any services (like HTTP or SSH). Simply deploy the Phabricator software
+to run any services (like HTTP or SSH). Simply deploy the Phorge software
and configuration and start the daemons.
Normally, there is little reason to deploy dedicated daemon hosts. They can
improve queue capacity, but generally do not improve availability or increase
resistance to data loss on their own. Instead, consider deploying more
repository hosts: repository hosts run daemons, so this will increase queue
capacity but also improve repository availability and cluster resistance.
Next Steps
==========
Continue by:
- returning to @{article:Clustering Introduction}; or
- configuring repositories first with @{article:Cluster: Repositories}.
diff --git a/src/docs/user/cluster/cluster_databases.diviner b/src/docs/user/cluster/cluster_databases.diviner
index 51bf14d925..c13cc2eba4 100644
--- a/src/docs/user/cluster/cluster_databases.diviner
+++ b/src/docs/user/cluster/cluster_databases.diviner
@@ -1,409 +1,409 @@
@title Cluster: Databases
@group cluster
-Configuring Phabricator to use multiple database hosts.
+Configuring Phorge to use multiple database hosts.
Overview
========
-You can deploy Phabricator with multiple database hosts, configured as a master
+You can deploy Phorge with multiple database hosts, configured as a master
and a set of replicas. The advantages of doing this are:
- faster recovery from disasters by promoting a replica;
- graceful degradation if the master fails; and
- some tools to help monitor and manage replica health.
This configuration is complex, and many installs do not need to pursue it.
-If you lose the master, Phabricator can degrade automatically into read-only
+If you lose the master, Phorge can degrade automatically into read-only
mode and remain available, but can not fully recover without operational
intervention unless the master recovers on its own.
-Phabricator will not currently send read traffic to replicas unless the master
+Phorge will not currently send read traffic to replicas unless the master
has failed, so configuring a replica will not currently spread any load away
-from the master. Future versions of Phabricator are expected to be able to
+from the master. Future versions of Phorge are expected to be able to
distribute some read traffic to replicas.
-Phabricator can not currently be configured into a multi-master mode, nor can
+Phorge can not currently be configured into a multi-master mode, nor can
it be configured to automatically promote a replica to become the new master.
There are no current plans to support multi-master mode or autonomous failover,
although this may change in the future.
-Phabricator applications //can// be partitioned across multiple database
+Phorge applications //can// be partitioned across multiple database
masters. This does not provide redundancy and generally does not increase
resilience or resistance to data loss, but can help you scale and operate
-Phabricator. For details, see
+Phorge. For details, see
@{article:Cluster: Partitioning and Advanced Configuration}.
Setting up MySQL Replication
============================
To begin, set up a replica database server and configure MySQL replication.
If you aren't sure how to do this, refer to the MySQL manual for instructions.
The MySQL documentation is comprehensive and walks through the steps and
options in good detail. You should understand MySQL replication before
-deploying it in production: Phabricator layers on top of it, and does not
+deploying it in production: Phorge layers on top of it, and does not
attempt to abstract it away.
-Some useful notes for configuring replication for Phabricator:
+Some useful notes for configuring replication for Phorge:
-**Binlog Format**: Phabricator issues some queries which MySQL will detect as
+**Binlog Format**: Phorge issues some queries which MySQL will detect as
unsafe if you use the `STATEMENT` binlog format (the default). Instead, use
`MIXED` (recommended) or `ROW` as the `binlog_format`.
-**Grant `REPLICATION CLIENT` Privilege**: If you give the user that Phabricator
+**Grant `REPLICATION CLIENT` Privilege**: If you give the user that Phorge
will use to connect to the replica database server the `REPLICATION CLIENT`
-privilege, Phabricator's status console can give you more information about
+privilege, Phorge's status console can give you more information about
replica health and state.
-**Copying Data to Replicas**: Phabricator currently uses a mixture of MyISAM
+**Copying Data to Replicas**: Phorge currently uses a mixture of MyISAM
and InnoDB tables, so it can be difficult to guarantee that a dump is wholly
consistent and suitable for loading into a replica because MySQL uses different
consistency mechanisms for the different storage engines.
An approach you may want to consider to limit downtime but still produce a
-consistent dump is to leave Phabricator running but configured in read-only
+consistent dump is to leave Phorge running but configured in read-only
mode while dumping:
- Stop all the daemons.
- Set `cluster.read-only` to `true` and deploy the new configuration. The
- web UI should now show that Phabricator is in "Read Only" mode.
+ web UI should now show that Phorge is in "Read Only" mode.
- Dump the database. You can do this with `bin/storage dump --for-replica`
to add the `--master-data` flag to the underlying command and include a
`CHANGE MASTER ...` statement in the dump.
- Once the dump finishes, turn `cluster.read-only` off again to restore
service. Continue loading the dump into the replica normally.
**Log Expiration**: You can configure MySQL to automatically clean up old
binary logs on startup with the `expire_logs_days` option. If you do not
configure this and do not explicitly purge old logs with `PURGE BINARY LOGS`,
the binary logs on disk will grow unboundedly and relatively quickly.
-Once you have a working replica, continue below to tell Phabricator about it.
+Once you have a working replica, continue below to tell Phorge about it.
Configuring Replicas
====================
-Once your replicas are in working order, tell Phabricator about them by
+Once your replicas are in working order, tell Phorge about them by
configuring the `cluster.databases` option. This option must be configured from
-the command line or in configuration files because Phabricator needs to read
+the command line or in configuration files because Phorge needs to read
it //before// it can connect to databases.
-This option value will list all of the database hosts that you want Phabricator
+This option value will list all of the database hosts that you want Phorge
to interact with: your master and all your replicas. Each entry in the list
should have these keys:
- `host`: //Required string.// The database host name.
- `role`: //Required string.// The cluster role of this host, one of
`master` or `replica`.
- `port`: //Optional int.// The port to connect to. If omitted, the default
port from `mysql.port` will be used.
- `user`: //Optional string.// The MySQL username to use to connect to this
host. If omitted, the default from `mysql.user` will be used.
- `pass`: //Optional string.// The password to use to connect to this host.
If omitted, the default from `mysql.pass` will be used.
- - `disabled`: //Optional bool.// If set to `true`, Phabricator will not
+ - `disabled`: //Optional bool.// If set to `true`, Phorge will not
connect to this host. You can use this to temporarily take a host out
of service.
When `cluster.databases` is configured the `mysql.host` option is not used.
The other MySQL connection configuration options (`mysql.port`, `mysql.user`,
`mysql.pass`) are used only to provide defaults.
-Once you've configured this option, restart Phabricator for the changes to take
+Once you've configured this option, restart Phorge for the changes to take
effect, then continue to "Monitoring Replicas" to verify the configuration.
Monitoring Replicas
===================
You can monitor replicas in {nav Config > Database Servers}. This interface
shows you a quick overview of replicas and their health, and can detect some
common issues with replication.
The table on this page shows each database and current status.
NOTE: This page runs its diagnostics //from the web server that is serving the
request//. If you are recovering from a disaster, the view this page shows
may be partial or misleading, and two requests served by different servers may
see different views of the cluster.
-**Connection**: Phabricator tries to connect to each configured database, then
+**Connection**: Phorge tries to connect to each configured database, then
shows the result in this column. If it fails, a brief diagnostic message with
details about the error is shown. If it succeeds, the column shows a rough
measurement of latency from the current webserver to the database.
**Replication**: This is a summary of replication status on the database. If
things are properly configured and stable, the replicas should be actively
replicating and no more than a few seconds behind master, and the master
should //not// be replicating from another database.
-To report this status, the user Phabricator is connecting as must have the
+To report this status, the user Phorge is connecting as must have the
`REPLICATION CLIENT` privilege (or the `SUPER` privilege) so it can run the
`SHOW SLAVE STATUS` command. The `REPLICATION CLIENT` privilege only enables
the user to run diagnostic commands so it should be reasonable to grant it in
most cases, but it is not required. If you choose not to grant it, this page
can not show any useful diagnostic information about replication status but
everything else will still work.
If a replica is more than a second behind master, this page will show the
current replication delay. If the replication delay is more than 30 seconds,
it will report "Slow Replication" with a warning icon.
If replication is delayed, data is at risk: if you lose the master and can not
later recover it (for example, because a meteor has obliterated the datacenter
housing the physical host), data which did not make it to the replica will be
lost forever.
Beyond the risk of data loss, any read-only traffic sent to the replica will
see an older view of the world which could be confusing for users: it may
appear that their data has been lost, even if it is safe and just hasn't
replicated yet.
-Phabricator will attempt to prevent clients from seeing out-of-date views, but
+Phorge will attempt to prevent clients from seeing out-of-date views, but
sometimes sending traffic to a delayed replica is the best available option
(for example, if the master can not be reached).
**Health**: This column shows the result of recent health checks against the
-server. After several checks in a row fail, Phabricator will mark the server
+server. After several checks in a row fail, Phorge will mark the server
as unhealthy and stop sending traffic to it until several checks in a row
later succeed.
Note that each web server tracks database health independently, so if you have
several servers they may have different views of database health. This is
normal and not problematic.
For more information on health checks, see "Unreachable Masters" below.
**Messages**: This column has additional details about any errors shown in the
other columns. These messages can help you understand or resolve problems.
Testing Replicas
================
To test that your configuration can survive a disaster, turn off the master
database. Do this with great ceremony, making a cool explosion sound as you
run the `mysqld stop` command.
-If things have been set up properly, Phabricator should degrade to a temporary
+If things have been set up properly, Phorge should degrade to a temporary
read-only mode immediately. After a brief period of unresponsiveness, it will
degrade further into a longer-term read-only mode. For details on how this
works internally, see "Unreachable Masters" below.
-Once satisfied, turn the master back on. After a brief delay, Phabricator
+Once satisfied, turn the master back on. After a brief delay, Phorge
should recognize that the master is healthy again and recover fully.
Throughout this process, the {nav Database Servers} console will show a
current view of the world from the perspective of the web server handling the
request. You can use it to monitor state.
You can perform a more narrow test by enabling `cluster.read-only` in
-configuration. This will put Phabricator into read-only mode immediately
+configuration. This will put Phorge into read-only mode immediately
without turning off any databases.
You can use this mode to understand which capabilities will and will not be
available in read-only mode, and make sure any information you want to remain
accessible in a disaster (like wiki pages or contact information) is really
accessible.
See the next section, "Degradation to Read Only Mode", for more details about
-when, why, and how Phabricator degrades.
+when, why, and how Phorge degrades.
If you run custom code or extensions, they may not accommodate read-only mode
properly. You should specifically test that they function correctly in
read-only mode and do not prevent you from accessing important information.
Degradation to Read-Only Mode
=============================
-Phabricator will degrade to read-only mode when any of these conditions occur:
+Phorge will degrade to read-only mode when any of these conditions occur:
- you turn it on explicitly;
- you configure cluster mode, but don't set up any masters;
- the master can not be reached while handling a request; or
- recent attempts to connect to the master have consistently failed.
-When Phabricator is running in read-only mode, users can still read data and
+When Phorge is running in read-only mode, users can still read data and
browse and clone repositories, but they can not edit, update, or push new
changes. For example, users can still read disaster recovery information on
the wiki or emergency contact information on user profiles.
You can enable this mode explicitly by configuring `cluster.read-only`. Some
reasons you might want to do this include:
- to test that the mode works like you expect it to;
- to make sure that information you need will be available;
- to prevent new writes while performing database maintenance; or
- - to permanently archive a Phabricator install.
+ - to permanently archive a Phorge install.
You can also enable this mode implicitly by configuring `cluster.databases`
but disabling the master, or by not specifying any host as a master. This may
be more convenient than turning it on explicitly during the course of
operations work.
-If Phabricator is unable to reach the master database, it will degrade into
+If Phorge is unable to reach the master database, it will degrade into
read-only mode automatically. See "Unreachable Masters" below for details on
how this process works.
If you end up in a situation where you have lost the master and can not get it
back online (or can not restore it quickly) you can promote a replica to become
the new master. See the next section, "Promoting a Replica", for details.
Promoting a Replica
===================
-If you lose access to the master database, Phabricator will degrade into
+If you lose access to the master database, Phorge will degrade into
read-only mode. This is described in greater detail below.
The easiest way to get out of read-only mode is to restore the master database.
If the database recovers on its own or operations staff can revive it,
-Phabricator will return to full working order after a few moments.
+Phorge will return to full working order after a few moments.
If you can't restore the master or are unsure you will be able to restore the
master quickly, you can promote a replica to become the new master instead.
Before doing this, you should first assess how far behind the master the
replica was when the link died. Any data which was not replicated will either
be lost or become very difficult to recover after you promote a replica.
For example, if some `T1234` had been created on the master but had not yet
replicated and you promote the replica, a new `T1234` may be created on the
replica after promotion. Even if you can recover the master later, merging
the data will be difficult because each database may have conflicting changes
which can not be merged easily.
If there was a significant replication delay at the time of the failure, you
may wait to try harder or spend more time attempting to recover the master
before choosing to promote.
If you have made a choice to promote, disable replication on the replica and
mark it as the `master` in `cluster.databases`. Remove the original master and
deploy the configuration change to all surviving hosts.
Once write service is restored, you should provision, deploy, and configure a
new replica by following the steps you took the first time around. You are
critically vulnerable to a second disruption until you have restored the
redundancy.
Unreachable Masters
===================
-This section describes how Phabricator determines that a master has been lost,
+This section describes how Phorge determines that a master has been lost,
marks it unreachable, and degrades into read-only mode.
-Phabricator degrades into read-only mode automatically in two ways: very
+Phorge degrades into read-only mode automatically in two ways: very
briefly in response to a single connection failure, or more permanently in
response to a series of connection failures.
In the first case, if a request needs to connect to the master but is not able
-to, Phabricator will temporarily degrade into read-only mode for the remainder
-of that request. The alternative is to fail abruptly, but Phabricator can
+to, Phorge will temporarily degrade into read-only mode for the remainder
+of that request. The alternative is to fail abruptly, but Phorge can
sometimes degrade successfully and still respond to the user's request, so it
makes an effort to finish serving the request from replicas.
If the request was a write (like posting a comment) it will fail anyway, but
if it was a read that did not actually need to use the master it may succeed.
This temporary mode is intended to recover as gracefully as possible from brief
interruptions in service (a few seconds), like a server being restarted, a
network link becoming temporarily unavailable, or brief periods of load-related
-disruption. If the anomaly is temporary, Phabricator should recover immediately
+disruption. If the anomaly is temporary, Phorge should recover immediately
(on the next request once service is restored).
This mode can be slow for users (they need to wait on connection attempts to
the master which fail) and does not reduce load on the master (requests still
attempt to connect to it).
-The second way Phabricator degrades is by running periodic health checks
+The second way Phorge degrades is by running periodic health checks
against databases, and marking them unhealthy if they fail over a longer period
of time. This mechanism is very similar to the health checks that most HTTP
load balancers perform against web servers.
-If a database fails several health checks in a row, Phabricator will mark it as
+If a database fails several health checks in a row, Phorge will mark it as
unhealthy and stop sending all traffic (except for more health checks) to it.
This improves performance during a service interruption and reduces load on the
master, which may help it recover from load problems.
You can monitor the status of health checks in the {nav Database Servers}
console. The "Health" column shows how many checks have run recently and
how many have succeeded.
Health checks run every 3 seconds, and 5 checks in a row must fail or succeed
-before Phabricator marks the database as healthy or unhealthy, so it will
+before Phorge marks the database as healthy or unhealthy, so it will
generally take about 15 seconds for a database to change state after it goes
down or comes up.
-If all of the recent checks fail, Phabricator will mark the database as
+If all of the recent checks fail, Phorge will mark the database as
unhealthy and stop sending traffic to it. If the master was the database that
-was marked as unhealthy, Phabricator will actively degrade into read-only mode
+was marked as unhealthy, Phorge will actively degrade into read-only mode
until it recovers.
This mode only attempts to connect to the unhealthy database once every few
seconds to see if it is recovering, so performance will be better on average
(users rarely need to wait for bad connections to fail or time out) and the
database will receive less load.
-Once all of the recent checks succeed, Phabricator will mark the database as
+Once all of the recent checks succeed, Phorge will mark the database as
healthy again and continue sending traffic to it.
Health checks are tracked individually for each web server, so some web servers
may see a host as healthy while others see it as unhealthy. This is normal, and
can accurately reflect the state of the world: for example, the link between
datacenters may have been lost, so hosts in one datacenter can no longer see
the master, while hosts in the other datacenter still have a healthy link to
it.
Backups
======
Even if you configure replication, you should still retain separate backup
snapshots. Replicas protect you from data loss if you lose a host, but they do
not let you recover from data mutation mistakes.
If something issues `DELETE` or `UPDATE` statements and destroys data on the
master, the mutation will propagate to the replicas almost immediately and the
data will be gone forever. Normally, the only way to recover this data is from
backup snapshots.
Although you should still have a backup process, your backup process can
safely pull dumps from a replica instead of the master. This operation can
be slow, so offloading it to a replica can make the performance of the master
more consistent.
To dump from a replica, you can use `bin/storage dump --host <host>` to
control which host the command connects to. (You may still want to execute
this command //from// that host, to avoid sending the whole dump over the
network).
With the `--for-replica` flag, the `bin/storage dump` command creates dumps
with `--master-data`, which includes a `CHANGE MASTER` statement in the output.
This may be helpful when initially setting up new replicas, as it can make it
easier to change the binlog coordinates to the correct position for the dump.
With recent versions of MySQL, it is also possible to configure a //delayed//
replica which intentionally lags behind the master (say, by 12 hours). In the
event of a bad mutation, this could give you a larger window of time to
recognize the issue and recover the lost data from the delayed replica (which
might be quick) without needing to restore backups (which might be very slow).
Delayed replication is outside the scope of this document, but may be worth
considering as an additional data security step on top of backup snapshots
depending on your resources and needs. If you configure a delayed replica, do
-not add it to the `cluster.databases` configuration: Phabricator should never
+not add it to the `cluster.databases` configuration: Phorge should never
send traffic to it, and does not need to know about it.
Next Steps
==========
Continue by:
- returning to @{article:Clustering Introduction}.
diff --git a/src/docs/user/cluster/cluster_devices.diviner b/src/docs/user/cluster/cluster_devices.diviner
index c90aa220c4..af29978345 100644
--- a/src/docs/user/cluster/cluster_devices.diviner
+++ b/src/docs/user/cluster/cluster_devices.diviner
@@ -1,247 +1,247 @@
@title Cluster: Devices
@group cluster
Guide to configuring hosts to act as cluster devices.
Cluster Context
===============
-This document describes a step in configuring Phabricator to run on
+This document describes a step in configuring Phorge to run on
multiple hosts in a cluster configuration. This is an advanced feature. For
more information on clustering, see @{article:Clustering Introduction}.
In this context, device configuration is mostly relevant to configuring
repository services in a cluster. You can find more details about this in
@{article:Cluster: Repositories}.
Overview
========
Some cluster services need to be able to authenticate themselves and interact
with other services. For example, two repository hosts holding copies of the
same repository must be able to fetch changes from one another, even if the
repository is private.
Within a cluster, devices authenticate using SSH keys. Some operations happen
over SSH (using keys in a normal way, as you would when running `ssh` from the
command line), while others happen over HTTP (using SSH keys to sign requests).
Before hosts can authenticate to one another, you need to configure the
credentials so other devices know the keys can be trusted. Beyond establishing
trust, this configuration will establish //device identity//, so each host
knows which device it is explicitly.
Today, this is primarily necessary when configuring repository clusters.
Using Almanac
=============
-The tool Phabricator uses to manage cluster devices is the **Almanac**
+The tool Phorge uses to manage cluster devices is the **Almanac**
application, and most configuration will occur through the application's web
UI. If you are not familiar with it, see @{article:Almanac User Guide} first.
This document assumes you are familiar with Almanac concepts.
What Lies Ahead
===============
Here's a brief overview of the steps required to register cluster devices. The
remainder of this document walks through these points in more detail.
- Create an Almanac device record for each device.
- Generate, add, and trust SSH keys if necessary.
- - Install Phabricator on the host.
+ - Install Phorge on the host.
- Use `bin/almanac register` from the host to register it as a device.
See below for guidance on each of these steps.
Individual vs Shared Keys
=========================
Before getting started, you should choose how you plan to manage device SSH
keys. Trust and device identity are handled separately, and there are two ways
to set up SSH keys so that devices can authenticate with one another:
- you can generate a unique SSH key for each device; or
- you can generate one SSH key and share it across multiple devices.
Using **unique keys** allows the tools to do some more sanity/safety checks and
makes it a bit more difficult to misconfigure things, but you'll have to do
more work managing the actual keys. This may be a better choice if you are
setting up a small cluster (2-3 devices) for the first time.
Using **shared keys** makes key management easier but safety checks won't be
able to catch a few kinds of mistakes. This may be a better choice if you are
setting up a larger cluster, plan to expand the cluster later, or have
-experience with Phabricator clustering.
+experience with Phorge clustering.
Because all cluster keys are all-powerful, there is no material difference
between these methods from a security or trust viewpoint. Unique keys are just
potentially easier to administrate at small scales, while shared keys are
easier at larger scales.
Create Almanac Device Records
=============================
-For each host you plan to make part of a Phabricator cluster, go to the
+For each host you plan to make part of a Phorge cluster, go to the
{nav Almanac} application and create a **device** record. For guidance on this
application, see @{article:Almanac User Guide}.
-Add **interfaces** to each device record so Phabricator can tell how to
+Add **interfaces** to each device record so Phorge can tell how to
connect to these hosts. Normally, you'll add one HTTP interface (usually on
port 80) and one SSH interface (by default, on port 2222) to each device:
For example, if you are building a two-host repository cluster, you may end
up with records that look like these:
- Device: `repo001.mycompany.net`
- Interface: `123.0.0.1:2222`
- Interface: `123.0.0.1:80`
- Device: `repo002.mycompany.net`
- Interface: `123.0.0.2:2222`
- Interface: `123.0.0.2:80`
Note that these hosts will normally run two `sshd` ports: the standard `sshd`
which you connect to to operate and administrate the host, and the special
-Phabricator `sshd` that you connect to to clone and push repositories.
+Phorge `sshd` that you connect to to clone and push repositories.
-You should specify the Phabricator `sshd` port, **not** the standard `sshd`
+You should specify the Phorge `sshd` port, **not** the standard `sshd`
port.
If you're using **unique** SSH keys for each device, continue to the next step.
If you're using **shared** SSH keys, create a third device with no interfaces,
like `keywarden.mycompany.net`. This device will just be used as a container to
hold the trusted SSH key and is not a real device.
NOTE: Do **not** create a **service** record yet. Today, service records become
active immediately once they are created, and you haven't set things up yet.
Generate and Trust SSH Keys
===========================
Next, you need to generate or upload SSH keys and mark them as trusted. Marking
a key as trusted gives it tremendous power.
If you're using **unique** SSH keys, upload or generate a key for each
individual device from the device detail screen in the Almanac web UI. Save the
private keys for the next step.
If you're using a **shared** SSH key, upload or generate a single key for
the keywarden device from the device detail screen in the Almanac web UI.
Save the private key for the next step.
Regardless of how many keys you generated, take the key IDs from the tables
in the web UI and run this command from the command line for each key, to mark
each key as trusted:
```
-phabricator/ $ ./bin/almanac trust-key --id <key-id-1>
-phabricator/ $ ./bin/almanac trust-key --id <key-id-2>
+phorge/ $ ./bin/almanac trust-key --id <key-id-1>
+phorge/ $ ./bin/almanac trust-key --id <key-id-2>
...
```
The warnings this command emits are serious. The private keys are now trusted,
and allow any user or device possessing them to sign requests that bypass
policy checks without requiring additional credentials. Guard them carefully!
If you need to revoke trust for a key later, use `untrust-key`:
```
-phabricator/ $ ./bin/almanac untrust-key --id <key-id>
+phorge/ $ ./bin/almanac untrust-key --id <key-id>
```
Once the keys are trusted, continue to the next step.
-Install Phabricator
+Install Phorge
===================
-If you haven't already, install Phabricator on each device you plan to enroll
+If you haven't already, install Phorge on each device you plan to enroll
in the cluster. Cluster repository devices must provide services over both HTTP
and SSH, so you need to install and configure both a webserver and a
-Phabricator `sshd` on these hosts.
+Phorge `sshd` on these hosts.
Generally, you will follow whatever process you otherwise use when installing
-Phabricator.
+Phorge.
NOTE: Do not start the daemons on the new devices yet. They won't work properly
until you've finished configuring things.
-Once Phabricator is installed, you can enroll the devices in the cluster by
+Once Phorge is installed, you can enroll the devices in the cluster by
registering them.
Register Devices
================
To register a host as an Almanac device, use `bin/almanac register`.
If you are using **unique** keys, run it like this:
```
$ ./bin/almanac register \
--device <device> \
--private-key <key>
```
For example, you might run this command on `repo001` when using unique keys:
```
$ ./bin/almanac register \
--device repo001.mycompany.net \
--private-key /path/to/private.key
```
If you are using a **shared** key, this will be a little more complicated
because you need to override some checks that are intended to prevent mistakes.
Use the `--identify-as` flag to choose a device identity:
```
$ ./bin/almanac register \
--device <keywarden-device> \
--private-key <key> \
--identify-as <actual-device>
```
For example, you might run this command on `repo001` when using a shared key:
```
$ ./bin/almanac register \
--device keywarden.mycompany.net \
--private-key /path/to/private-key \
--identify-as repo001.mycompany.net
```
In particular, note that `--device` is always the **trusted** device associated
with the trusted key. The `--identify-as` flag allows several different hosts
to share the same key but still identify as different devices.
The overall effect of the `bin/almanac` command is to copy identity and key
-files into `phabricator/conf/keys/`. You can inspect the results by examining
+files into `phorge/conf/keys/`. You can inspect the results by examining
that directory. The helper script just catches potential mistakes and makes
sure the process is completed correctly.
Note that a copy of the active private key is stored in the `conf/keys/`
directory permanently.
When converting a host into a cluster host, you may need to revisit
@{article:Diffusion User Guide: Repository Hosting} and double check the `sudo`
permission for the host. In particular, cluster hosts need to be able to run
`ssh` via `sudo` so they can read the device private key.
Next Steps
==========
Now that devices are registered, you can build cluster services from them.
Return to the relevant cluster service documentation to continue:
- build repository clusters with @{article:Cluster: Repositories};
- return to @{article:Clustering Introduction}; or
- review the Almanac application with @{article:Almanac User Guide}.
diff --git a/src/docs/user/cluster/cluster_notifications.diviner b/src/docs/user/cluster/cluster_notifications.diviner
index 79c89769fc..2893aefa91 100644
--- a/src/docs/user/cluster/cluster_notifications.diviner
+++ b/src/docs/user/cluster/cluster_notifications.diviner
@@ -1,171 +1,171 @@
@title Cluster: Notifications
@group cluster
-Configuring Phabricator to use multiple notification servers.
+Configuring Phorge to use multiple notification servers.
Overview
========
You can run multiple notification servers. The advantages of doing this
are:
- you can completely survive the loss of any subset so long as one
remains standing; and
- performance and capacity may improve.
This configuration is relatively simple, but has a small impact on availability
and does nothing to increase resistance to data loss.
Clustering Design Goals
=======================
Notification clustering aims to restore service automatically after the loss
of some nodes. It does **not** attempt to guarantee that every message is
delivered.
Notification messages provide timely information about events, but they are
never authoritative and never the only way for users to learn about events.
For example, if a notification about a task update is not delivered, the next
page you load will still show the notification in your notification menu.
-Generally, Phabricator works fine without notifications configured at all, so
+Generally, Phorge works fine without notifications configured at all, so
clustering assumes that losing some messages during a disruption is acceptable.
How Clustering Works
====================
Notification clustering is very simple: notification servers relay every
message they receive to a list of peers.
When you configure clustering, you'll run multiple servers and tell them that
the other servers exist. When any server receives a message, it retransmits it
to all the severs it knows about.
When a server is lost, clients will automatically reconnect after a brief
delay. They may lose some notifications while their client is reconnecting,
but normally this should only last for a few seconds.
Configuring Aphlict
===================
To configure clustering on the server side, add a `cluster` key to your
Aphlict configuration file. For more details about configuring Aphlict,
see @{article:Notifications User Guide: Setup and Configuration}.
The `cluster` key should contain a list of `"admin"` server locations. Every
message the server receives will be retransmitted to all nodes in the list.
The server is smart enough to avoid sending messages in a cycle, and to avoid
sending messages to itself. You can safely list every server you run in the
configuration file, including the current server.
You do not need to configure servers in an acyclic graph or only list //other//
servers: just list everything on every server and Aphlict will figure things
out from there.
A simple example with two servers might look like this:
```lang=json, name="aphlict.json (Cluster)"
{
...
"cluster": [
{
"host": "notify001.mycompany.com",
"port": 22281,
"protocol": "http"
},
{
"host": "notify002.mycompany.com",
"port": 22281,
"protocol": "http"
}
]
...
}
```
-Configuring Phabricator
+Configuring Phorge
=======================
To configure clustering on the client side, add every service you run to
`notification.servers`. Generally, this will be twice as many entries as
you run actual servers, since each server runs a `"client"` service and an
`"admin"` service.
A simple example with the two servers above (providing four total services)
might look like this:
```lang=json, name="notification.servers (Cluster)"
[
{
"type": "client",
"host": "notify001.mycompany.com",
"port": 22280,
"protocol": "https"
},
{
"type": "client",
"host": "notify002.mycompany.com",
"port": 22280,
"protocol": "https"
},
{
"type": "admin",
"host": "notify001.mycompany.com",
"port": 22281,
"protocol": "http"
},
{
"type": "admin",
"host": "notify002.mycompany.com",
"port": 22281,
"protocol": "http"
}
]
```
If you put all of the `"client"` servers behind a load balancer, you would
just list the load balancer and let it handle pulling nodes in and out of
service.
```lang=json, name="notification.servers (Cluster + Load Balancer)"
[
{
"type": "client",
"host": "notify-lb.mycompany.com",
"port": 22280,
"protocol": "https"
},
{
"type": "admin",
"host": "notify001.mycompany.com",
"port": 22281,
"protocol": "http"
},
{
"type": "admin",
"host": "notify002.mycompany.com",
"port": 22281,
"protocol": "http"
}
]
```
Notification hosts do not need to run any additional services, although they
are free to do so. The notification server generally consumes few resources
and is resistant to most other loads on the machine, so it's reasonable to
overlay these on top of other services wherever it is convenient.
Next Steps
==========
Continue by:
- reviewing notification configuration with
@{article:Notifications User Guide: Setup and Configuration}; or
- returning to @{article:Clustering Introduction}.
diff --git a/src/docs/user/cluster/cluster_partitioning.diviner b/src/docs/user/cluster/cluster_partitioning.diviner
index 20ae11d6a6..cfac0d02ac 100644
--- a/src/docs/user/cluster/cluster_partitioning.diviner
+++ b/src/docs/user/cluster/cluster_partitioning.diviner
@@ -1,241 +1,241 @@
@title Cluster: Partitioning and Advanced Configuration
@group cluster
-Guide to partitioning Phabricator applications across multiple database hosts.
+Guide to partitioning Phorge applications across multiple database hosts.
Overview
========
-You can partition Phabricator's applications across multiple databases. For
+You can partition Phorge's applications across multiple databases. For
example, you can move an application like Files or Maniphest to a dedicated
database host.
The advantages of doing this are:
- moving heavily used applications to dedicated hardware can help you
scale; and
- you can match application workloads to hardware or configuration to make
operating the cluster easier.
This configuration is complex, and very few installs will benefit from pursuing
-it. Phabricator will normally run comfortably with a single database master
+it. Phorge will normally run comfortably with a single database master
even for large organizations.
Partitioning generally does not do much to increase resilience or make it
easier to recover from disasters, and is primarily a mechanism for scaling and
operational convenience.
If you are considering partitioning, you likely want to configure replication
with a single master first. Even if you choose not to deploy replication, you
should review and understand how replication works before you partition. For
details, see @{article:Cluster: Databases}.
Databases also support some advanced configuration options. Briefly:
- `persistent`: Allows use of persistent connections, reducing pressure on
outbound ports.
See "Advanced Configuration", below, for additional discussion.
What Partitioning Does
======================
-When you partition Phabricator, you move all of the data for one or more
+When you partition Phorge, you move all of the data for one or more
applications (like Maniphest) to a new master database host. This is possible
-because Phabricator stores data for each application in its own logical
-database (like `phabricator_maniphest`) and performs no joins between databases.
+because Phorge stores data for each application in its own logical
+database (like `phorge_maniphest`) and performs no joins between databases.
If you're running into scale limits on a single master database, you can move
one or more of your most commonly-used applications to a second database host
and continue adding users. You can keep partitioning applications until all
heavily used applications have dedicated database servers.
Alternatively or additionally, you can partition applications to make operating
the cluster easier. Some applications have unusual workloads or requirements,
and moving them to separate hosts may make things easier to deal with overall.
For example: if Files accounts for most of the data on your install, you might
move it to a different host to make backing up everything else easier.
Configuration Overview
======================
To configure partitioning, you will add multiple entries to `cluster.databases`
with the `master` role. Each `master` should specify a new `partition` key,
which contains a list of application databases it should host.
One master may be specified as the `default` partition. Applications not
explicitly configured to be assigned elsewhere will be assigned here.
When you define multiple `master` databases, you must also specify which master
each `replica` database follows. Here's a simple example config:
```lang=json
...
"cluster.databases": [
{
"host": "db001.corporation.com",
"role": "master",
- "user": "phabricator",
+ "user": "phorge",
"pass": "hunter2!trustno1",
"port": 3306,
"partition": [
"default"
]
},
{
"host": "db002.corporation.com",
"role": "replica",
- "user": "phabricator",
+ "user": "phorge",
"pass": "hunter2!trustno1",
"port": 3306,
"master": "db001.corporation.com:3306"
},
{
"host": "db003.corporation.com",
"role": "master",
- "user": "phabricator",
+ "user": "phorge",
"pass": "hunter2!trustno1",
"port": 3306,
"partition": [
"file",
"passphrase",
"slowvote"
]
},
{
"host": "db004.corporation.com",
"role": "replica",
- "user": "phabricator",
+ "user": "phorge",
"pass": "hunter2!trustno1",
"port": 3306,
"master": "db003.corporation.com:3306"
}
],
...
```
In this configuration, `db001` is a master and `db002` replicates it.
`db003` is a second master, replicated by `db004`.
Applications have been partitioned like this:
- `db003`/`db004`: Files, Passphrase, Slowvote
- `db001`/`db002`: Default (all other applications)
Not all of the database partition names are the same as the application
names. You can get a list of databases with `bin/storage databases` to identify
the correct database names.
After you have configured partitioning, it needs to be committed to the
databases. This writes a copy of the configuration to tables on the databases,
preventing errors if a webserver accidentally starts with an old or invalid
configuration.
To commit the configuration, run this command:
```
-phabricator/ $ ./bin/storage partition
+phorge/ $ ./bin/storage partition
```
Run this command after making any partition or clustering changes. Webservers
will not serve traffic if their configuration and the database configuration
differ.
Launching a new Partition
=========================
To add a new partition, follow these steps:
- Set up the new database host or hosts.
- Add the new database to `cluster.databases`, but keep its "partition"
configuration empty (just an empty list). If this is the first time you
are partitioning, you will need to configure your existing master as the
- new "default". This will let Phabricator interact with it, but won't send
+ new "default". This will let Phorge interact with it, but won't send
any traffic to it yet.
- Run `bin/storage partition`.
- Run `bin/storage upgrade` to initialize the schemata on the new hosts.
- - Stop writes to the applications you want to move by putting Phabricator
+ - Stop writes to the applications you want to move by putting Phorge
in read-only mode, or shutting down the webserver and daemons, or telling
everyone not to touch anything.
- Dump the data from the application databases on the old master.
- Load the data into the application databases on the new master.
- - Reconfigure the "partition" setup so that Phabricator knows the databases
+ - Reconfigure the "partition" setup so that Phorge knows the databases
have moved.
- Run `bin/storage partition`.
- While still in read-only mode, check that all the data appears to be
intact.
- Resume writes.
You can do this with a small, rarely-used application first (on most installs,
Slowvote might be a good candidate) if you want to run through the process
end-to-end before performing a larger, higher-stakes migration.
How Partitioning Works
======================
-If you have multiple masters, Phabricator keeps the entire set of schemata up
+If you have multiple masters, Phorge keeps the entire set of schemata up
to date on all of them. When you run `bin/storage upgrade` or other storage
management commands, they generally affect all masters (if they do not, they
will prompt you to be more specific).
When the application goes to read or write normal data (for example, to query a
list of tasks) it only connects to the master which the application it is
acting on behalf of is assigned to.
In most cases, a masters will not have any data in most the databases which are
not assigned to it. If they do (for example, because they previously hosted the
application) the data is ignored. This approach (of maintaining all schemata on
all hosts) makes it easier to move data and to quickly revert changes if a
configuration mistake occurs.
There are some exceptions to this rule. For example, all masters keep track
of which patches have been applied to that particular master so that
`bin/storage upgrade` can upgrade hosts correctly.
-Phabricator does not perform joins across logical databases, so there are no
+Phorge does not perform joins across logical databases, so there are no
meaningful differences in runtime behavior if two applications are on the same
physical host or different physical hosts.
Advanced Configuration
======================
Separate from partitioning, some advanced configuration is supported. These
options must be set on database specifications in `cluster.databases`. You can
configure them without actually building a cluster by defining a cluster with
only one master.
`persistent` //(bool)// Enables persistent connections. Defaults to off.
-With persistent connections enabled, Phabricator will keep a pool of database
+With persistent connections enabled, Phorge will keep a pool of database
connections open between web requests and reuse them when serving subsequent
requests.
The primary benefit of using persistent connections is that it will greatly
reduce pressure on how quickly outbound TCP ports are opened and closed. After
a TCP port closes, it normally can't be used again for about 60 seconds, so
rapidly cycling ports can cause resource exhaustion. If you're seeing failures
because requests are unable to bind to an outbound port, enabling this option
is likely to fix the issue. This option may also slightly increase performance.
The cost of using persistent connections is that you may need to raise the
-MySQL `max_connections` setting: although Phabricator will make far fewer
+MySQL `max_connections` setting: although Phorge will make far fewer
connections, the connections it does make will be longer-lived. Raising this
setting will increase MySQL memory requirements and may run into other limits,
like `open_files_limit`, which may also need to be raised.
Persistent connections are enabled per-database. If you always want to use
them, set the flag on each configured database in `cluster.databases`.
Next Steps
==========
Continue by:
- returning to @{article:Clustering Introduction}.
diff --git a/src/docs/user/cluster/cluster_repositories.diviner b/src/docs/user/cluster/cluster_repositories.diviner
index a2e5fd5b68..2160a73fef 100644
--- a/src/docs/user/cluster/cluster_repositories.diviner
+++ b/src/docs/user/cluster/cluster_repositories.diviner
@@ -1,612 +1,612 @@
@title Cluster: Repositories
@group cluster
-Configuring Phabricator to use multiple repository hosts.
+Configuring Phorge to use multiple repository hosts.
Overview
========
-If you use Git, you can deploy Phabricator with multiple repository hosts,
+If you use Git, you can deploy Phorge with multiple repository hosts,
configured so that each host is readable and writable. The advantages of doing
this are:
- you can completely survive the loss of repository hosts;
- reads and writes can scale across multiple machines; and
- read and write performance across multiple geographic regions may improve.
This configuration is complex, and many installs do not need to pursue it.
This configuration is not currently supported with Subversion or Mercurial.
How Reads and Writes Work
=========================
-Phabricator repository replicas are multi-master: every node is readable and
+Phorge repository replicas are multi-master: every node is readable and
writable, and a cluster of nodes can (almost always) survive the loss of any
arbitrary subset of nodes so long as at least one node is still alive.
-Phabricator maintains an internal version for each repository, and increments
+Phorge maintains an internal version for each repository, and increments
it when the repository is mutated.
Before responding to a read, replicas make sure their version of the repository
is up to date (no node in the cluster has a newer version of the repository).
If it isn't, they block the read until they can complete a fetch.
Before responding to a write, replicas obtain a global lock, perform the same
version check and fetch if necessary, then allow the write to continue.
Additionally, repositories passively check other nodes for updates and
replicate changes in the background. After you push a change to a repository,
it will usually spread passively to all other repository nodes within a few
minutes.
Even if passive replication is slow, the active replication makes acknowledged
changes sequential to all observers: after a write is acknowledged, all
subsequent reads are guaranteed to see it. The system does not permit stale
reads, and you do not need to wait for a replication delay to see a consistent
view of the repository no matter which node you ask.
HTTP vs HTTPS
=============
Intracluster requests (from the daemons to repository servers, or from
webservers to repository servers) are permitted to use HTTP, even if you have
set `security.require-https` in your configuration.
It is common to terminate SSL at a load balancer and use plain HTTP beyond
that, and the `security.require-https` feature is primarily focused on making
client browser behavior more convenient for users, so it does not apply to
intracluster traffic.
Using HTTP within the cluster leaves you vulnerable to attackers who can
observe traffic within a datacenter, or observe traffic between datacenters.
This is normally very difficult, but within reach for state-level adversaries
like the NSA.
If you are concerned about these attackers, you can terminate HTTPS on
repository hosts and bind to them with the "https" protocol. Just be aware that
the `security.require-https` setting won't prevent you from making
configuration mistakes, as it doesn't cover intracluster traffic.
Other mitigations are possible, but securing a network against the NSA and
similar agents of other rogue nations is beyond the scope of this document.
Repository Hosts
================
-Repository hosts must run a complete, fully configured copy of Phabricator,
+Repository hosts must run a complete, fully configured copy of Phorge,
including a webserver. They must also run a properly configured `sshd`.
If you are converting existing hosts into cluster hosts, you may need to
revisit @{article:Diffusion User Guide: Repository Hosting} and make sure
the system user accounts have all the necessary `sudo` permissions. In
particular, cluster devices need `sudo` access to `ssh` so they can read
device keys.
Generally, these hosts will run the same set of services and configuration that
web hosts run. If you prefer, you can overlay these services and put web and
repository services on the same hosts. See @{article:Clustering Introduction}
for some guidance on overlaying services.
When a user requests information about a repository that can only be satisfied
by examining a repository working copy, the webserver receiving the request
will make an HTTP service call to a repository server which hosts the
repository to retrieve the data it needs. It will use the result of this query
to respond to the user.
Setting up Cluster Services
=============================
To set up clustering, first register the devices that you want to use as part
of the cluster with Almanac. For details, see @{article:Cluster: Devices}.
NOTE: Once you create a service, new repositories will immediately allocate
on it. You may want to disable repository creation during initial setup.
NOTE: To create clustered services, your account must have the "Can Manage
Cluster Services" capability. By default, no accounts have this capability,
and you must enable it by changing the configuration of the Almanac
application. Navigate to the Alamanc application configuration as follows:
{nav icon=home, name=Home >
Applications >
Almanac >
Configure >
Edit Policies >
Can Manage Cluster Services }
Once the hosts are registered as devices, you can create a new service in
Almanac:
- First, register at least one device according to the device clustering
instructions.
- - Create a new service of type **Phabricator Cluster: Repository** in
+ - Create a new service of type **Phorge Cluster: Repository** in
Almanac.
- Bind this service to all the interfaces on the device or devices.
- For each binding, add a `protocol` key with one of these values:
`ssh`, `http`, `https`.
For example, a service might look like this:
- Service: `repos001.mycompany.net`
- Binding: `repo001.mycompany.net:80`, `protocol=http`
- Binding: `repo001.mycompany.net:2222`, `protocol=ssh`
The service itself has a `closed` property. You can set this to `true` to
disable new repository allocations on this service (for example, if it is
reaching capacity).
Migrating to Clustered Services
===============================
To convert existing repositories on an install into cluster repositories, you
will generally perform these steps:
- Register the existing host as a cluster device.
- Configure a single host repository service using //only// that host.
This puts you in a transitional state where repositories on the host can work
as either on-host repositories or cluster repositories. You can move forward
from here slowly and make sure services still work, with a quick path back to
safety if you run into trouble.
To move forward, migrate one repository to the service and make sure things
work correctly. If you run into issues, you can back out by migrating the
repository off the service.
To migrate a repository onto a cluster service, use this command:
```
$ ./bin/repository clusterize <repository> --service <service>
```
To migrate a repository back off a service, use this command:
```
$ ./bin/repository clusterize <repository> --remove-service
```
-This command only changes how Phabricator connects to the repository; it does
+This command only changes how Phorge connects to the repository; it does
not move any data or make any complex structural changes.
-When Phabricator needs information about a non-clustered repository, it just
-runs a command like `git log` directly on disk. When Phabricator needs
+When Phorge needs information about a non-clustered repository, it just
+runs a command like `git log` directly on disk. When Phorge needs
information about a clustered repository, it instead makes a service call to
another server, asking that server to run `git log` instead.
In a single-host cluster the server will make this service call to itself, so
nothing will really change. But this //is// an effective test for most
possible configuration mistakes.
If your canary repository works well, you can migrate the rest of your
repositories when ready (you can use `bin/repository list` to quickly get a
list of all repository monograms).
Once all repositories are migrated, you've reached a stable state and can
remain here as long as you want. This state is sufficient to convert daemons,
SSH, and web services into clustered versions and spread them across multiple
machines if those goals are more interesting.
Obviously, your single-device "cluster" will not be able to survive the loss of
the single repository host, but you can take as long as you want to expand the
cluster and add redundancy.
After creating a service, you do not need to `clusterize` new repositories:
they will automatically allocate onto an open service.
When you're ready to expand the cluster, continue below.
Expanding a Cluster
===================
To expand an existing cluster, follow these general steps:
- Register new devices in Almanac.
- Add bindings to the new devices to the repository service, also in Almanac.
- Start the daemons on the new devices.
For instructions on configuring and registering devices, see
@{article:Cluster: Devices}.
-As soon as you add active bindings to a service, Phabricator will begin
+As soon as you add active bindings to a service, Phorge will begin
synchronizing repositories and sending traffic to the new device. You do not
-need to copy any repository data to the device: Phabricator will automatically
+need to copy any repository data to the device: Phorge will automatically
synchronize it.
If you have a large amount of repository data, you may want to help this
process along by copying the repository directory from an existing cluster
device before bringing the new host online. This is optional, but can reduce
the amount of time required to fully synchronize the cluster.
You do not need to synchronize the most up-to-date data or stop writes during
this process. For example, loading the most recent backup snapshot onto the new
device will substantially reduce the amount of data that needs to be
synchronized.
Contracting a Cluster
=====================
If you want to remove working devices from a cluster (for example, to take
hosts down for maintenance), first do this for each device:
- Change the `writable` property on the bindings to "Prevent Writes".
- Wait a few moments until the cluster synchronizes (see
"Monitoring Services" below).
This will ensure that the device you're about to remove is not the only cluster
leader, even if the cluster is receiving a high write volume. You can skip this
step if the device isn't working property to start with.
Once you've stopped writes and waited for synchronization (or if the hosts are
not working in the first place) do this for each device:
- Disable the bindings from the service to the device in Almanac.
If you are removing a device because it failed abruptly (or removing several
devices at once; or you skip the "Prevent Writes" step), it is possible that
some repositories will have lost all their leaders. See "Loss of Leaders" below
to understand and resolve this.
If you want to put the hosts back in service later:
- Enable the bindings again.
- Change `writable` back to "Allow Writes".
This will restore the cluster to the original state.
Monitoring Services
===================
You can get an overview of repository cluster status from the
{nav Config > Repository Servers} screen. This table shows a high-level
overview of all active repository services.
**Repos**: The number of repositories hosted on this service.
**Sync**: Synchronization status of repositories on this service. This is an
at-a-glance view of service health, and can show these values:
- **Synchronized**: All nodes are fully synchronized and have the latest
version of all repositories.
- **Partial**: All repositories either have at least two leaders, or have
a very recent write which is not expected to have propagated yet.
- **Unsynchronized**: At least one repository has changes which are
only available on one node and were not pushed very recently. Data may
be at risk.
- **No Repositories**: This service has no repositories.
- **Ambiguous Leader**: At least one repository has an ambiguous leader.
If this screen identifies problems, you can drill down into repository details
to get more information about them. See the next section for details.
Monitoring Repositories
=======================
You can get a more detailed view the current status of a specific repository on
cluster devices in {nav Diffusion > (Repository) > Manage Repository > Cluster
Configuration}.
This screen shows all the configured devices which are hosting the repository
and the available version on that device.
-**Version**: When a repository is mutated by a push, Phabricator increases
+**Version**: When a repository is mutated by a push, Phorge increases
an internal version number for the repository. This column shows which version
is on disk on the corresponding device.
After a change is pushed, the device which received the change will have a
larger version number than the other devices. The change should be passively
replicated to the remaining devices after a brief period of time, although this
can take a while if the change was large or the network connection between
devices is slow or unreliable.
You can click the version number to see the corresponding push logs for that
change. The logs contain details about what was changed, and can help you
identify if replication is slow because a change is large or for some other
reason.
**Writing**: This shows that the device is currently holding a write lock. This
normally means that it is actively receiving a push, but can also mean that
there was a write interruption. See "Write Interruptions" below for details.
**Last Writer**: This column identifies the user who most recently pushed a
change to this device. If the write lock is currently held, this user is
the user whose change is holding the lock.
**Last Write At**: When the most recent write started. If the write lock is
currently held, this shows when the lock was acquired.
Cluster Failure Modes
=====================
There are three major cluster failure modes:
- **Write Interruptions**: A write started but did not complete, leaving
the disk state and cluster state out of sync.
- **Loss of Leaders**: None of the devices with the most up-to-date data
are reachable.
- **Ambiguous Leaders**: The internal state of the repository is unclear.
-Phabricator can detect these issues, and responds by freezing the repository
+Phorge can detect these issues, and responds by freezing the repository
(usually preventing all reads and writes) until the issue is resolved. These
-conditions are normally rare and very little data is at risk, but Phabricator
+conditions are normally rare and very little data is at risk, but Phorge
errs on the side of caution and requires decisions which may result in data
loss to be confirmed by a human.
The next sections cover these failure modes and appropriate responses in
more detail. In general, you will respond to these issues by assessing the
situation and then possibly choosing to discard some data.
Write Interruptions
===================
A repository cluster can be put into an inconsistent state by an interruption
in a brief window during and immediately after a write. This looks like this:
- A change is pushed to a server.
- The server acquires a write lock and begins writing the change.
- During or immediately after the write, lightning strikes the server
and destroys it.
-Phabricator can not commit changes to a working copy (stored on disk) and to
+Phorge can not commit changes to a working copy (stored on disk) and to
the global state (stored in a database) atomically, so there is necessarily a
narrow window between committing these two different states when some tragedy
can befall a server, leaving the global and local views of the repository state
possibly divergent.
-In these cases, Phabricator fails into a frozen state where further writes
+In these cases, Phorge fails into a frozen state where further writes
are not permitted until the failure is investigated and resolved. When a
repository is frozen in this way it remains readable.
You can use the monitoring console to review the state of a frozen repository
with a held write lock. The **Writing** column will show which device is
holding the lock, and whoever is named in the **Last Writer** column may be
able to help you figure out what happened by providing more information about
what they were doing and what they observed.
Because the push was not acknowledged, it is normally safe to resolve this
issue by demoting the device. Demoting the device will undo any changes
committed by the push, and they will be lost forever.
However, the user should have received an error anyway, and should not expect
their push to have worked. Still, data is technically at risk and you may want
to investigate further and try to understand the issue in more detail before
continuing.
There is no way to explicitly keep the write, but if it was committed to disk
you can recover it manually from the working copy on the device (for example,
by using `git format-patch`) and then push it again after recovering.
If you demote the device, the in-process write will be thrown away, even if it
was complete on disk. To demote the device and release the write lock, run this
command:
```
-phabricator/ $ ./bin/repository thaw <repository> --demote <device>
+phorge/ $ ./bin/repository thaw <repository> --demote <device>
```
{icon exclamation-triangle, color="yellow"} Any committed but unacknowledged
data on the device will be lost.
Loss of Leaders
===============
A more straightforward failure condition is the loss of all servers in a
cluster which have the most up-to-date copy of a repository. This looks like
this:
- There is a cluster setup with two devices, X and Y.
- A new change is pushed to server X.
- Before the change can propagate to server Y, lightning strikes server X
and destroys it.
Here, all of the "leader" devices with the most up-to-date copy of the
-repository have been lost. Phabricator will freeze the repository refuse to
+repository have been lost. Phorge will freeze the repository refuse to
serve requests because it can not serve reads consistently and can not accept
new writes without data loss.
The most straightforward way to resolve this issue is to restore any leader to
service. The change will be able to replicate to other devices once a leader
comes back online.
If you are unable to restore a leader or unsure that you can restore one
quickly, you can use the monitoring console to review which changes are
present on the leaders but not present on the followers by examining the
push logs.
-If you are comfortable discarding these changes, you can instruct Phabricator
+If you are comfortable discarding these changes, you can instruct Phorge
that it can forget about the leaders by doing this:
- Disable the service bindings to all of the leader devices so they are no
longer part of the cluster.
- Then, use `bin/repository thaw` to `--demote` the leaders explicitly.
To demote a device, run this command:
```
-phabricator/ $ ./bin/repository thaw rXYZ --demote repo002.corp.net
+phorge/ $ ./bin/repository thaw rXYZ --demote repo002.corp.net
```
{icon exclamation-triangle, color="red"} Any data which is only present on
the demoted device will be lost.
If you do this, **you will lose unreplicated data**. You will discard any
changes on the affected leaders which have not replicated to other devices
in the cluster.
If you have lost an entire cluster and replaced it with new devices that you
have restored from backups, you can aggressively wipe all memory of the old
devices by using `--demote <service>` and `--all-repositories`. **This is
dangerous and discards all unreplicated data in any repository on any device.**
```
-phabricator/ $ ./bin/repository thaw --demote repo.corp.net --all-repositories
+phorge/ $ ./bin/repository thaw --demote repo.corp.net --all-repositories
```
After you do this, continue below to promote a leader and restore the cluster
to service.
Ambiguous Leaders
=================
Repository clusters can also freeze if the leader devices are ambiguous. This
can happen if you replace an entire cluster with new devices suddenly, or make
a mistake with the `--demote` flag. This may arise from some kind of operator
error, like these:
- Someone accidentally uses `bin/repository thaw ... --demote` to demote
every device in a cluster.
- Someone accidentally deletes all the version information for a repository
from the database by making a mistake with a `DELETE` or `UPDATE` query.
- Someone accidentally disables all of the devices in a cluster, then adds
entirely new ones before repositories can propagate.
If you are moving repositories into cluster services, you can also reach this
state if you use `clusterize` to associate a repository with a service that is
-bound to multiple active devices. In this case, Phabricator will not know which
+bound to multiple active devices. In this case, Phorge will not know which
device or devices have up-to-date information.
-When Phabricator can not tell which device in a cluster is a leader, it freezes
+When Phorge can not tell which device in a cluster is a leader, it freezes
the cluster because it is possible that some devices have less data and others
have more, and if it chooses a leader arbitrarily it may destroy some data
which you would prefer to retain.
-To resolve this, you need to tell Phabricator which device has the most
+To resolve this, you need to tell Phorge which device has the most
up-to-date data and promote that device to become a leader. If you know all
devices have the same data, you are free to promote any device.
If you promote a device, **you may lose data** if you promote the wrong device
and some other device really had more up-to-date data. If you want to double
check, you can examine the working copies on disk before promoting by
connecting to the machines and using commands like `git log` to inspect state.
Once you have identified a device which has data you're happy with, use
`bin/repository thaw` to `--promote` the device. The data on the chosen
device will become authoritative:
```
-phabricator/ $ ./bin/repository thaw rXYZ --promote repo002.corp.net
+phorge/ $ ./bin/repository thaw rXYZ --promote repo002.corp.net
```
{icon exclamation-triangle, color="red"} Any data which is only present on
**other** devices will be lost.
Backups
======
Even if you configure clustering, you should still consider retaining separate
backup snapshots. Replicas protect you from data loss if you lose a host, but
they do not let you rewind time to recover from data mutation mistakes.
If something issues a `--force` push that destroys branch heads, the mutation
will propagate to the replicas.
You may be able to manually restore the branches by using tools like the
-Phabricator push log or the Git reflog so it is less important to retain
+Phorge push log or the Git reflog so it is less important to retain
repository snapshots than database snapshots, but it is still possible for
data to be lost permanently, especially if you don't notice the problem for
some time.
Retaining separate backup snapshots will improve your ability to recover more
data more easily in a wider range of disaster situations.
Ad-Hoc Maintenance Locks
========================
Occasionally, you may want to perform maintenance to a clustered repository
which requires you modify the actual content of the repository.
For example: you might want to delete a large number of old or temporary
branches; or you might want to merge a very large number of commits from
another source.
These operations may be prohibitively slow or complex to perform using normal
pushes. In cases where you would prefer to directly modify a working copy, you
can use a maintenance lock to safely make a working copy mutable.
If you simply perform this kind of content-modifying maintenance by directly
modifying the repository on disk with commands like `git update-ref`, your
changes may either encounter conflicts or encounter problems with change
propagation.
You can encounter conflicts because directly modifying the working copy on disk
-won't prevent users or Phabricator itself from performing writes to the same
-working copy at the same time. Phabricator does not compromise the lower-level
+won't prevent users or Phorge itself from performing writes to the same
+working copy at the same time. Phorge does not compromise the lower-level
locks provided by the VCS so this is theoretically safe -- and this rarely
causes any significant problems in practice -- but doesn't make things any
simpler or easier.
Your changes may fail to propagate because writing directly to the repository
doesn't turn it into the new cluster leader after your writes complete. If
another node accepts the next push, it will become the new leader -- without
your changes -- and all other nodes will synchronize from it.
Note that some maintenance operations (like `git gc`, `git prune`, or
`git repack`) do not modify repository content. In theory, these operations do
not require a maintenance lock: lower-level Git locks should protect
them from conflicts, and they can not be affected by propagation issues because
they do not propagate. In practice, these operations are not conflict-free in
all circumstances. Using a maintenance lock may be overkill, but it's probably
still a good idea.
To use a maintenance lock:
- Open two terminal windows. You'll use one window to hold the lock and a
second window to perform maintenance.
- Run `bin/repository lock <repository> ...` in one terminal.
- When the process reports that repositories are locked, switch to the second
terminal and perform maintenance. The `repository lock` process should
still be running in your first terminal.
- After maintenance completes, switch back to the first terminal and answer
the prompt to confirm maintenance is complete.
The workflow looks something like this:
```
$ ./bin/repository lock R2
These repositories will be locked:
- R2 Git Test Repository
While the lock is held: users will be unable to write to this repository,
and you may safely perform working copy maintenance on this node in another
terminal window.
Lock repositories and begin maintenance? [y/N] y
Repositories are now locked. You may begin maintenance in another terminal
window. Keep this process running until you complete the maintenance, then
confirm that you are ready to release the locks.
Ready to release the locks? [y/N] y
Done.
```
As maintenance completes, the push log for the repository will be updated to
reflect that you performed maintenance.
If the lock is interrupted, you may encounter a "Write Interruptions" condition
described earlier in this document. See that section for details. In most
cases, you can resolve this issue by demoting the node you are working on.
Next Steps
==========
Continue by:
- returning to @{article:Clustering Introduction}.
diff --git a/src/docs/user/cluster/cluster_search.diviner b/src/docs/user/cluster/cluster_search.diviner
index 25c35aa34a..4bbd1c2fbd 100644
--- a/src/docs/user/cluster/cluster_search.diviner
+++ b/src/docs/user/cluster/cluster_search.diviner
@@ -1,210 +1,210 @@
@title Cluster: Search
@group cluster
Overview
========
-You can configure Phabricator to connect to one or more fulltext search
+You can configure Phorge to connect to one or more fulltext search
services.
-By default, Phabricator will use MySQL for fulltext search. This is suitable
+By default, Phorge will use MySQL for fulltext search. This is suitable
for most installs. However, alternate engines are supported.
Configuring Search Services
===========================
To configure search services, adjust the `cluster.search` configuration
option. This option contains a list of one or more fulltext search services,
like this:
```lang=json
[
{
"type": "...",
"hosts": [
...
],
"roles": {
"read": true,
"write": true
}
}
]
```
-When a user makes a change to a document, Phabricator writes the updated
+When a user makes a change to a document, Phorge writes the updated
document into every configured, writable fulltext service.
-When a user issues a query, Phabricator tries configured, readable services
+When a user issues a query, Phorge tries configured, readable services
in order until it is able to execute the query successfully.
These options are supported by all service types:
| Key | Description |
|---|---|
| `type` | Constant identifying the service type, like `mysql`.
| `roles` | Dictionary of role settings, for enabling reads and writes.
| `hosts` | List of hosts for this service.
Some service types support additional options.
Available Service Types
=======================
These service types are supported:
| Service | Key | Description |
|---|---|---|
| MySQL | `mysql` | Default MySQL fulltext index.
| Elasticsearch | `elasticsearch` | Use an external Elasticsearch service
Fulltext Service Roles
======================
These roles are supported:
| Role | Key | Description
|---|---|---|
| Read | `read` | Allows the service to be queried when users search.
| Write | `write` | Allows documents to be published to the service.
Specifying Hosts
================
The `hosts` key should contain a list of dictionaries, each specifying the
details of a host. A service should normally have one or more hosts.
When an option is set at the service level, it serves as a default for all
hosts. It may be overridden by changing the value for a particular host.
Service Type: MySQL
==============
The `mysql` service type does not require any configuration, and does not
need to have hosts specified. This service uses the builtin database to
index and search documents.
A typical `mysql` service configuration looks like this:
```lang=json
{
"type": "mysql"
}
```
Service Type: Elasticsearch
======================
The `elasticsearch` service type supports these options:
| Key | Description |
|---|---|
| `protocol` | Either `"http"` (default) or `"https"`.
| `port` | Elasticsearch TCP port.
| `version` | Elasticsearch version, either `2` or `5` (default).
| `path` | Path for the index. Defaults to `/phabricator`. Advanced.
A typical `elasticsearch` service configuration looks like this:
```lang=json
{
"type": "elasticsearch",
"hosts": [
{
"protocol": "http",
"host": "127.0.0.1",
"port": 9200
}
]
}
```
Monitoring Search Services
==========================
You can monitor fulltext search in {nav Config > Search Servers}. This
interface shows you a quick overview of services and their health.
The table on this page shows some basic stats for each configured service,
followed by the configuration and current status of each host.
Rebuilding Indexes
==================
After adding new search services, you will need to rebuild document indexes
on them. To do this, first initialize the services:
```
-phabricator/ $ ./bin/search init
+phorge/ $ ./bin/search init
```
This will perform index setup steps and other one-time configuration.
To populate documents in all indexes, run this command:
```
-phabricator/ $ ./bin/search index --force --background --type all
+phorge/ $ ./bin/search index --force --background --type all
```
This initiates an exhaustive rebuild of the document indexes. To get a more
detailed list of indexing options available, run:
```
-phabricator/ $ ./bin/search help index
+phorge/ $ ./bin/search help index
```
Advanced Example
================
This is a more advanced example which shows a configuration with multiple
different services in different roles. In this example:
- - Phabricator is using an Elasticsearch 2 service as its primary fulltext
+ - Phorge is using an Elasticsearch 2 service as its primary fulltext
service.
- An Elasticsearch 5 service is online, but only receiving writes.
- The MySQL service is serving as a backup if Elasticsearch fails.
This particular configuration may not be very useful. It is primarily
intended to show how to configure many different options.
```lang=json
[
{
"type": "elasticsearch",
"version": 2,
"hosts": [
{
"host": "elastic2.mycompany.com",
"port": 9200,
"protocol": "http"
}
]
},
{
"type": "elasticsearch",
"version": 5,
"hosts": [
{
"host": "elastic5.mycompany.com",
"port": 9789,
"protocol": "https"
"roles": {
"read": false,
"write": true
}
}
]
},
{
"type": "mysql"
}
]
```
diff --git a/src/docs/user/cluster/cluster_ssh.diviner b/src/docs/user/cluster/cluster_ssh.diviner
index bdd41776f5..57551309a6 100644
--- a/src/docs/user/cluster/cluster_ssh.diviner
+++ b/src/docs/user/cluster/cluster_ssh.diviner
@@ -1,47 +1,47 @@
@title Cluster: SSH Servers
@group cluster
-Configuring Phabricator to use multiple SSH servers.
+Configuring Phorge to use multiple SSH servers.
Overview
========
-You can run Phabricator on multiple SSH servers. The advantages of doing this
+You can run Phorge on multiple SSH servers. The advantages of doing this
are:
- you can completely survive the loss of multiple SSH hosts.
This configuration is simple, but you must configure repositories first. For
details, see @{article:Cluster: Repositories}.
SSH servers accept SSH requests from commands like `git clone` and relay them
to hosts that can serve the requests.
Adding SSH Hosts
================
After configuring repositories in cluster mode, you can add more web hosts
at any time.
-First, deploy the Phabricator software and configuration to a host, then
+First, deploy the Phorge software and configuration to a host, then
register the host as a cluster device if it is not already registered (for
help, see @{article:Cluster: Devices}.)
Once the host is registered, start the SSH server, and then add the host to the
SSH load balancer pool.
-Phabricator SSH servers are stateless, so you can pull them in and out of
+Phorge SSH servers are stateless, so you can pull them in and out of
production freely.
You may also want to run web services on these hosts, since the service is very
similar to SSH, also stateless, and it may be simpler to load balance the
services together. For details, see @{article:Cluster: Web Servers}.
Next Steps
==========
Continue by:
- returning to @{article:Clustering Introduction}.
diff --git a/src/docs/user/cluster/cluster_webservers.diviner b/src/docs/user/cluster/cluster_webservers.diviner
index 7c7c3e8b1f..7483e8f6cd 100644
--- a/src/docs/user/cluster/cluster_webservers.diviner
+++ b/src/docs/user/cluster/cluster_webservers.diviner
@@ -1,45 +1,45 @@
@title Cluster: Web Servers
@group cluster
-Configuring Phabricator to use multiple web servers.
+Configuring Phorge to use multiple web servers.
Overview
========
-You can run Phabricator on multiple web servers. The advantages of doing this
+You can run Phorge on multiple web servers. The advantages of doing this
are:
- you can completely survive the loss of multiple web hosts; and
- performance and capacity may improve.
This configuration is simple, but you must configure repositories first. For
details, see @{article:Cluster: Repositories}.
Adding Web Hosts
================
After configuring repositories in cluster mode, you can add more web hosts
at any time.
-First, deploy the Phabricator software and configuration to a host, then
+First, deploy the Phorge software and configuration to a host, then
register the host as a cluster device if it is not already registered (for
help, see @{article:Cluster: Devices}.)
Once the host is registered, start the web server, and then add the host to the
load balancer pool.
-Phabricator web servers are stateless, so you can pull them in and out of
+Phorge web servers are stateless, so you can pull them in and out of
production freely.
You may also want to run SSH services on these hosts, since the service is very
similar to HTTP, also stateless, and it may be simpler to load balance the
services together. For details, see @{article:Cluster: SSH Servers}.
Next Steps
==========
Continue by:
- returning to @{article:Clustering Introduction}.
diff --git a/src/docs/user/configuration/advanced_configuration.diviner b/src/docs/user/configuration/advanced_configuration.diviner
index 5721ffd597..8c1389d992 100644
--- a/src/docs/user/configuration/advanced_configuration.diviner
+++ b/src/docs/user/configuration/advanced_configuration.diviner
@@ -1,117 +1,117 @@
@title Configuration User Guide: Advanced Configuration
@group config
-Configuring Phabricator for multiple environments.
+Configuring Phorge for multiple environments.
= Overview =
-Phabricator reads configuration from multiple sources. This document explains
+Phorge reads configuration from multiple sources. This document explains
the configuration stack and how to set up advanced configuration sources, which
may be useful for deployments with multiple environments (e.g., development and
production).
This is a complicated topic for advanced users. You do not need to understand
-this topic to install Phabricator.
+this topic to install Phorge.
= Configuration Sources =
-Phabricator supports the following configuration sources, from highest priority
+Phorge supports the following configuration sources, from highest priority
to lowest priority:
- **Database**: Values are stored in the database and edited from the web UI
by administrators. They have the highest priority and override other
settings.
- **Local**: Values are stored in `conf/local/config.json` and edited by
running `bin/config`.
- **Config Files**: Values are stored in a config file in `conf/`. The file
to use is selected by writing to `conf/local/ENVIRONMENT`, or setting the
`PHABRICATOR_ENV` configuration variable. See below for more information.
- - **Defaults**: Defaults hard-coded in the Phabricator source, which can not
+ - **Defaults**: Defaults hard-coded in the Phorge source, which can not
be edited. They have the lowest priority, and all other settings override
them.
-Normally, you install and configure Phabricator by writing enough configuration
+Normally, you install and configure Phorge by writing enough configuration
into the local config to get access to the database configuration (e.g., the
MySQL username, password, and hostname), then use the web interface to further
-configure Phabricator.
+configure Phorge.
= Configuration Files =
Configuration files provide an alternative to database configuration, and may be
appropriate if you want to deploy in multiple environments or create dynamic
configuration. Configuration files are more complicated than database
configuration, which is why they are not used by default.
== Creating a Configuration File ==
To create a configuration file, first choose a name for the config (like
"devserver" or "live"). For the purposes of this section, we'll assume you chose
`exampleconfig`. Replace "exampleconfig" with whatever you actually chose in the
examples below.
First, write an `exampleconfig.conf.php` file here (rename it according to the
name you chose):
- phabricator/conf/custom/exampleconfig.conf.php
+ phorge/conf/custom/exampleconfig.conf.php
Its contents should look like this:
<?php
return array(
// Specify whichever keys and values you want to set.
'example.key' => 'examplevalue',
);
For example, to specify MySQL credentials in your config file, you might create
a config like this:
<?php
return array(
'mysql.host' => 'localhost',
'mysql.user' => 'root',
'mysql.pass' => 'hunter2trustno1',
);
== Selecting a Configuration File ==
To select a configuration file, write the name of the file (relative to
-`phabricator/conf/`) to `phabricator/conf/local/ENVIRONMENT`. For example, to
-select `phabricator/conf/custom/exampleconfig.conf.php`, you would write
-"custom/exampleconfig" to `phabricator/conf/local/ENVIRONMENT`:
+`phorge/conf/`) to `phorge/conf/local/ENVIRONMENT`. For example, to
+select `phorge/conf/custom/exampleconfig.conf.php`, you would write
+"custom/exampleconfig" to `phorge/conf/local/ENVIRONMENT`:
- phabricator/ $ echo custom/exampleconfig > conf/local/ENVIRONMENT
- phabricator/ $ cat conf/local/ENVIRONMENT
+ phorge/ $ echo custom/exampleconfig > conf/local/ENVIRONMENT
+ phorge/ $ cat conf/local/ENVIRONMENT
custom/exampleconfig
- phabricator/ $
+ phorge/ $
You can also set the environmental variable `PHABRICATOR_ENV`. This is more
involved but may be easier in some deployment environments. Note that this needs
to be set in your webserver environment, and also in your shell whenever you
run a script:
```
# Shell
export PHABRICATOR_ENV=custom/exampleconfig
# Apache
SetEnv PHABRICATOR_ENV custom/exampleconfig
# nginx
fastcgi_param PHABRICATOR_ENV "custom/exampleconfig";
# lighttpd
setenv.add-environment = (
"PHABRICATOR_ENV" => "custom/exampleconfig",
)
```
-After creating and selecting a configuration file, restart Phabricator (for
-help, see @{article:Restarting Phabricator}). Any configuration you set should
+After creating and selecting a configuration file, restart Phorge (for
+help, see @{article:Restarting Phorge}). Any configuration you set should
take effect immediately, and your file should be visible in the Config
application when examining configuration.
= Next Steps =
Return to the @{article:Configuration Guide}.
diff --git a/src/docs/user/configuration/configuration_guide.diviner b/src/docs/user/configuration/configuration_guide.diviner
index 8b221bda41..7798ff0e0b 100644
--- a/src/docs/user/configuration/configuration_guide.diviner
+++ b/src/docs/user/configuration/configuration_guide.diviner
@@ -1,212 +1,212 @@
@title Configuration Guide
@group config
-This document contains basic configuration instructions for Phabricator.
+This document contains basic configuration instructions for Phorge.
= Prerequisites =
This document assumes you've already installed all the components you need.
If you haven't, see @{article:Installation Guide}.
The next steps are:
- Configure your webserver (Apache, nginx, or lighttpd).
- - Access Phabricator with your browser.
+ - Access Phorge with your browser.
- Follow the instructions to complete setup.
= Webserver: Configuring Apache =
NOTE: Follow these instructions to use Apache. To use nginx or lighttpd, scroll
down to their sections.
Get Apache running and verify it's serving a test page. Consult the Apache
documentation for help. Make sure `mod_php` and `mod_rewrite` are enabled,
and `mod_ssl` if you intend to set up SSL.
If you haven't already, set up a domain name to point to the host you're
-installing on. You can either install Phabricator on a subdomain (like
-phabricator.example.com) or an entire domain, but you can not install it in
+installing on. You can either install Phorge on a subdomain (like
+phorge.example.com) or an entire domain, but you can not install it in
some subdirectory of an existing website. Navigate to whatever domain you're
going to use and make sure Apache serves you something to verify that DNS
is correctly configured.
NOTE: The domain must contain a dot ('.'), i.e. not be just a bare name like
'http://example/'. Some web browsers will not set cookies otherwise.
-Now create a VirtualHost entry for Phabricator. It should look something like
+Now create a VirtualHost entry for Phorge. It should look something like
this:
name=httpd.conf
<VirtualHost *>
# Change this to the domain which points to your host.
- ServerName phabricator.example.com
+ ServerName phorge.example.com
- # Change this to the path where you put 'phabricator' when you checked it
- # out from GitHub when following the Installation Guide.
+ # Change this to the path where you put 'phorge' when you checked it
+ # out from the upstream when following the Installation Guide.
#
# Make sure you include "/webroot" at the end!
- DocumentRoot /path/to/phabricator/webroot
+ DocumentRoot /path/to/phorge/webroot
RewriteEngine on
RewriteRule ^(.*)$ /index.php?__path__=$1 [B,L,QSA]
</VirtualHost>
If Apache isn't currently configured to serve documents out of the directory
-where you put Phabricator, you may also need to add `<Directory />` section. The
+where you put Phorge, you may also need to add `<Directory />` section. The
syntax for this section depends on which version of Apache you're running.
(If you don't know, you can usually figure this out by running `httpd -v`.)
For Apache versions older than 2.4, use this:
name="Apache Older Than 2.4"
- <Directory "/path/to/phabricator/webroot">
+ <Directory "/path/to/phorge/webroot">
Order allow,deny
Allow from all
</Directory>
For Apache versions 2.4 and newer, use this:
name="Apache 2.4 and Newer"
- <Directory "/path/to/phabricator/webroot">
+ <Directory "/path/to/phorge/webroot">
Require all granted
</Directory>
After making your edits, restart Apache, then continue to "Setup" below.
= Webserver: Configuring nginx =
NOTE: Follow these instructions to use nginx. To use Apache or lighttpd, scroll
to their sections.
For nginx, use a configuration like this:
name=nginx.conf
server {
- server_name phabricator.example.com;
- root /path/to/phabricator/webroot;
+ server_name phorge.example.com;
+ root /path/to/phorge/webroot;
location / {
index index.php;
rewrite ^/(.*)$ /index.php?__path__=/$1 last;
}
location /index.php {
fastcgi_pass localhost:9000;
fastcgi_index index.php;
#required if PHP was built with --enable-force-cgi-redirect
fastcgi_param REDIRECT_STATUS 200;
#variables to make the $_SERVER populate in PHP
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
}
}
Restart nginx after making your edits, then continue to "Setup" below.
= Webserver: Configuring lighttpd =
NOTE: Follow these instructions to use lighttpd. To use Apache or nginx, scroll
up to their sections.
For lighttpd, add a section like this to your lighttpd.conf:
- $HTTP["host"] =~ "phabricator(\.example\.com)?" {
- server.document-root = "/path/to/phabricator/webroot"
+ $HTTP["host"] =~ "phorge(\.example\.com)?" {
+ server.document-root = "/path/to/phorge/webroot"
url.rewrite-once = (
# This simulates QSA ("query string append") mode in apache
"^(/[^?]*)\?(.*)" => "/index.php?__path__=$1&$2",
"^(/.*)$" => "/index.php?__path__=$1",
)
}
You should also ensure the following modules are listed in your
server.modules list:
mod_fastcgi
mod_rewrite
Finally, you should run the following commands to enable php support:
$ sudo apt-get install php5-cgi # for Ubuntu; other distros should be similar
$ sudo lighty-enable-mod fastcgi-php
Restart lighttpd after making your edits, then continue below.
Load Balancer Health Checks
===========================
If you're using a load balancer in front of your webserver, you can configure
it to perform health checks using the path `/status/`.
= Setup =
Now, navigate to whichever subdomain you set up. You should see instructions to
continue setup. The rest of this document contains additional instructions for
specific setup steps.
When you resolve any issues and see the welcome screen, enter credentials to
create your initial administrator account. After you log in, you'll want to
configure how other users will be able to log in or register -- until you do,
no one else will be able to sign up or log in. For more information, see
@{article:Configuring Accounts and Registration}.
= Storage: Configuring MySQL =
During setup, you'll need to configure MySQL. To do this, get MySQL running and
verify you can connect to it. Consult the MySQL documentation for help. When
-MySQL works, you need to load the Phabricator schemata into it. To do this, run:
+MySQL works, you need to load the Phorge schemata into it. To do this, run:
- phabricator/ $ ./bin/storage upgrade
+ phorge/ $ ./bin/storage upgrade
If your configuration uses an unprivileged user to connect to the database, you
may have to override the default user so the schema changes can be applied with
root or some other admin user:
- phabricator/ $ ./bin/storage upgrade --user <user> --password <password>
+ phorge/ $ ./bin/storage upgrade --user <user> --password <password>
You can avoid the prompt the script issues by passing the `--force` flag (for
example, if you are scripting the upgrade process).
- phabricator/ $ ./bin/storage upgrade --force
+ phorge/ $ ./bin/storage upgrade --force
-NOTE: When you update Phabricator, run `storage upgrade` again to apply any
+NOTE: When you update Phorge, run `storage upgrade` again to apply any
new updates.
= Next Steps =
Continue by:
- setting up your admin account and login/registration with
@{article:Configuring Accounts and Registration}; or
- understanding advanced configuration topics with
@{article:Configuration User Guide: Advanced Configuration}; or
- configuring an alternate file domain with
@{article:Configuring a File Domain}; or
- configuring a preamble script to set up the environment properly behind a
load balancer, or adjust rate limiting with
@{article:Configuring a Preamble Script}; or
- configuring where uploaded files and attachments will be stored with
@{article:Configuring File Storage}; or
- - configuring Phabricator so it can send mail with
+ - configuring Phorge so it can send mail with
@{article:Configuring Outbound Email}; or
- configuring inbound mail with @{article:Configuring Inbound Email}; or
- importing repositories with @{article:Diffusion User Guide}; or
- learning about daemons with @{article:Managing Daemons with phd}; or
- learning about notification with
@{article:Notifications User Guide: Setup and Configuration}; or
- configuring backups with
@{article:Configuring Backups and Performing Migrations}; or
- - contributing to Phabricator with @{article:Contributor Introduction}.
+ - contributing to Phorge with @{article:Contributor Introduction}.
diff --git a/src/docs/user/configuration/configuration_locked.diviner b/src/docs/user/configuration/configuration_locked.diviner
index 57ed76c5c7..19b06ea424 100644
--- a/src/docs/user/configuration/configuration_locked.diviner
+++ b/src/docs/user/configuration/configuration_locked.diviner
@@ -1,176 +1,176 @@
@title Configuration Guide: Locked and Hidden Configuration
@group config
Details about locked and hidden configuration.
Overview
========
Some configuration options are **Locked** or **Hidden**. If an option has one
of these attributes, it means:
- **Locked Configuration**: This setting can not be written from the web UI.
- **Hidden Configuration**: This setting can not be read or written from
the web UI.
This document explains these attributes in more detail.
Locked Configuration
====================
**Locked Configuration** can not be edited from the web UI. In general, you
can edit it from the CLI instead, with `bin/config`:
```
-phabricator/ $ ./bin/config set <key> <value>
+phorge/ $ ./bin/config set <key> <value>
```
Some configuration options take complicated values which can be difficult
to escape properly for the shell. The easiest way to set these options is
to use the `--stdin` flag. First, put your desired value in a `config.json`
file:
```name=config.json, lang=json
{
"duck": "quack",
"cow": "moo"
}
```
Then, set it with `--stdin` like this:
```
-phabricator/ $ ./bin/config set <key> --stdin < config.json
+phorge/ $ ./bin/config set <key> --stdin < config.json
```
A few settings have alternate CLI tools. Refer to the setting page for
details.
Note that these settings can not be written to the database, even from the
CLI.
Locked values can not be unlocked: they are locked because of what the setting
does or how the setting operates. Some of the reasons configuration options are
locked include:
**Required for bootstrapping**: Some options, like `mysql.host`, must be
-available before Phabricator can read configuration from the database.
+available before Phorge can read configuration from the database.
-If you stored `mysql.host` only in the database, Phabricator would not know how
+If you stored `mysql.host` only in the database, Phorge would not know how
to connect to the database in order to read the value in the first place.
These options must be provided in a configuration source which is read earlier
-in the bootstrapping process, before Phabricator connects to the database.
+in the bootstrapping process, before Phorge connects to the database.
**Errors could not be fixed from the web UI**: Some options, like
`phabricator.base-uri`, can effectively disable the web UI if they are
configured incorrectly.
If these options could be configured from the web UI, you could not fix them if
you made a mistake (because the web UI would no longer work, so you could not
load the page to change the value).
We require these options to be edited from the CLI to make sure the editor has
access to fix any mistakes.
**Attackers could gain greater access**: Some options could be modified by an
attacker who has gained access to an administrator account in order to gain
greater access.
For example, an attacker who could modify `cluster.mailers` (and other
-similar options), could potentially reconfigure Phabricator to send mail
+similar options), could potentially reconfigure Phorge to send mail
through an evil server they controlled, then trigger password resets on other
user accounts to compromise them.
We require these options to be edited from the CLI to make sure the editor
has full access to the install.
Hidden Configuration
====================
**Hidden Configuration** is similar to locked configuration, but also can not
be //read// from the web UI.
In almost all cases, configuration is hidden because it is some sort of secret
key or access token for an external service. These values are hidden from the
web UI to prevent administrators (or attackers who have compromised
administrator accounts) from reading them.
You can review (and edit) hidden configuration from the CLI:
```
-phabricator/ $ ./bin/config get <key>
-phabricator/ $ ./bin/config set <key> <value>
+phorge/ $ ./bin/config get <key>
+phorge/ $ ./bin/config set <key> <value>
```
Locked Configuration With Database Values
=========================================
You may receive a setup issue warning you that a locked configuration key has a
value set in the database. Most commonly, this is because:
- - In some earlier version of Phabricator, this configuration was not locked.
+ - In some earlier version of Phorge, this configuration was not locked.
- In the past, you or some other administrator used the web UI to set a
value. This value was written to the database.
- In a later version of the software, the value became locked.
-When Phabricator was originally released, locked configuration did not yet
+When Phorge was originally released, locked configuration did not yet
exist. Locked configuration was introduced later, and then configuration options
were gradually locked for a long time after that.
In some cases the meaning of a value changed and it became possible to use it
to break an install or the configuration became a security risk. In other
cases, we identified an existing security risk or arrived at some other reason
to lock the value.
Locking values was more common in the past, and it is now relatively rare for
an unlocked value to become locked: when new values are introduced, they are
generally locked or hidden appropriately. In most cases, this setup issue only
-affects installs that have used Phabricator for a long time.
+affects installs that have used Phorge for a long time.
-At time of writing (February 2019), Phabricator currently respects these old
-database values. However, some future version of Phabricator will refuse to
+At time of writing (February 2019), Phorge currently respects these old
+database values. However, some future version of Phorge will refuse to
read locked configuration from the database, because this improves security if
an attacker manages to find a way to bypass restrictions on editing locked
configuration from the web UI.
To clear this setup warning and avoid surprise behavioral changes in the future,
you should move these configuration values from the database to a local config
file. Usually, you'll do this by first copying the value from the database:
```
-phabricator/ $ ./bin/config get <key>
+phorge/ $ ./bin/config get <key>
```
...into local configuration:
```
-phabricator/ $ ./bin/config set <key> <value>
+phorge/ $ ./bin/config set <key> <value>
```
...and then removing the database value:
```
-phabricator/ $ ./bin/config delete --database <key>
+phorge/ $ ./bin/config delete --database <key>
```
See @{Configuration User Guide: Advanced Configuration} for some more detailed
discussion of different configuration sources.
Next Steps
==========
Continue by:
- learning more about advanced options with
@{Configuration User Guide: Advanced Configuration}; or
- returning to the @{article: Configuration Guide}.
diff --git a/src/docs/user/configuration/configuring_accounts_and_registration.diviner b/src/docs/user/configuration/configuring_accounts_and_registration.diviner
index e703c46402..5d28b3342c 100644
--- a/src/docs/user/configuration/configuring_accounts_and_registration.diviner
+++ b/src/docs/user/configuration/configuring_accounts_and_registration.diviner
@@ -1,61 +1,61 @@
@title Configuring Accounts and Registration
@group config
-Describes how to configure user access to Phabricator.
+Describes how to configure user access to Phorge.
Overview
========
-Phabricator supports a number of login systems. You can enable or disable these
+Phorge supports a number of login systems. You can enable or disable these
systems to configure who can register for and access your install, and how users
with existing accounts can login.
Methods of logging in are called **Authentication Providers**. For example,
there is a "Username/Password" authentication provider available, which allows
users to log in with a traditional username and password. Other providers
support logging in with other credentials. For example:
- **LDAP:** Users use LDAP credentials to log in or register.
- **OAuth:** Users use accounts on a supported OAuth2 provider (like
GitHub, Facebook, or Google) to log in or register.
- - **Other Providers:** More providers are available, and Phabricator
+ - **Other Providers:** More providers are available, and Phorge
can be extended with custom providers. See the "Auth" application for
a list of available providers.
By default, no providers are enabled. You must use the "Auth" application to
add one or more providers after you complete the installation process.
After you add a provider, you can link it to existing accounts (for example,
-associate an existing Phabricator account with a GitHub OAuth account) or users
+associate an existing Phorge account with a GitHub OAuth account) or users
can use it to register new accounts (assuming you enable these options).
Recovering Inaccessible Accounts
================================
-If you accidentally lock yourself out of Phabricator (for example, by disabling
+If you accidentally lock yourself out of Phorge (for example, by disabling
all authentication providers), you can normally use the "send a login link"
action from the login screen to email yourself a login link and regain access
to your account.
If that isn't working (perhaps because you haven't configured email yet), you
can use the `bin/auth` script to recover access to an account. To recover
access, run:
```
-phabricator/ $ ./bin/auth recover <username>
+phorge/ $ ./bin/auth recover <username>
```
...where `<username>` is the account username you want to recover access
to. This will generate a link which will log you in as the specified user.
For more details on recovering access to accounts and unlocking objects, see
@{article:User Guide: Unlocking Objects}.
Next Steps
==========
Continue by:
- returning to the @{article:Configuration Guide}.
diff --git a/src/docs/user/configuration/configuring_backups.diviner b/src/docs/user/configuration/configuring_backups.diviner
index 0e851d01ac..e4e088aa8e 100644
--- a/src/docs/user/configuration/configuring_backups.diviner
+++ b/src/docs/user/configuration/configuring_backups.diviner
@@ -1,171 +1,171 @@
@title Configuring Backups and Performing Migrations
@group config
-Advice for backing up Phabricator, or migrating from one machine to another.
+Advice for backing up Phorge, or migrating from one machine to another.
Overview
========
-Phabricator does not currently have a comprehensive backup system, but creating
-backups is not particularly difficult and Phabricator does have a few basic
+Phorge does not currently have a comprehensive backup system, but creating
+backups is not particularly difficult and Phorge does have a few basic
tools which can help you set up a reasonable process. In particular, the things
which needs to be backed up are:
- the MySQL databases;
- hosted repositories;
- uploaded files; and
- - your Phabricator configuration files.
+ - your Phorge configuration files.
This document discusses approaches for backing up this data.
If you are migrating from one machine to another, you can generally follow the
same steps you would if you were creating a backup and then restoring it, you
will just backup the old machine and then restore the data onto the new
machine.
-WARNING: You need to restart Phabricator after restoring data.
+WARNING: You need to restart Phorge after restoring data.
-Restarting Phabricator after performing a restore makes sure that caches are
+Restarting Phorge after performing a restore makes sure that caches are
flushed properly. For complete instructions, see
-@{article:Restarting Phabricator}.
+@{article:Restarting Phorge}.
Backup: MySQL Databases
=======================
-Most of Phabricator's data is stored in MySQL, and it's the most important thing
+Most of Phorge's data is stored in MySQL, and it's the most important thing
to back up. You can run `bin/storage dump` to get a dump of all the MySQL
databases. This is a convenience script which just runs a normal `mysqldump`,
-but will only dump databases Phabricator owns.
+but will only dump databases Phorge owns.
Since most of this data is compressible, it may be helpful to run it through
gzip prior to storage. For example:
- phabricator/ $ ./bin/storage dump --compress --output backup.sql.gz
+ phorge/ $ ./bin/storage dump --compress --output backup.sql.gz
Then store the backup somewhere safe, like in a box buried under an old tree
stump. No one will ever think to look for it there.
Restore: MySQL
==============
To restore a MySQL dump, just pipe it to `mysql` on a clean host. (You may need
to uncompress it first, if you compressed it prior to storage.)
$ gunzip -c backup.sql.gz | mysql
Backup: Hosted Repositories
===========================
-If you host repositories in Phabricator, you should back them up. You can use
+If you host repositories in Phorge, you should back them up. You can use
`bin/repository list-paths` to show the local paths on disk for each
repository. To back them up, copy them elsewhere.
You can also just clone them and keep the clones up to date, or use
{nav Add Mirror} to have them mirror somewhere automatically.
Restore: Hosted Repositories
============================
To restore hosted repositories, copy them back into the correct locations
as shown by `bin/repository list-paths`.
Backup: Uploaded Files
======================
Uploaded files may be stored in several different locations. The backup
procedure depends on where files are stored:
**Default / MySQL**: Under the default configuration, uploaded files are stored
in MySQL, so the MySQL backup will include all files. In this case, you don't
need to do any additional work.
**Amazon S3**: If you use Amazon S3, redundancy and backups are built in to the
service. This is probably sufficient for most installs. If you trust Amazon with
your data //except not really//, you can backup your S3 bucket outside of
-Phabricator.
+Phorge.
**Local Disk**: If you use the local disk storage engine, you'll need to back up
files manually. You can do this by creating a copy of the root directory where
-you told Phabricator to put files (the `storage.local-disk.path` configuration
+you told Phorge to put files (the `storage.local-disk.path` configuration
setting).
For more information about configuring how files are stored, see
@{article:Configuring File Storage}.
Restore: Uploaded Files
=======================
To restore a backup of local disk storage, just copy the backup into place.
Backup: Configuration Files
===========================
You should also backup your configuration files, and any scripts you use to
-deploy or administrate Phabricator (like a customized upgrade script). The best
+deploy or administrate Phorge (like a customized upgrade script). The best
way to do this is to check them into a private repository somewhere and just use
whatever backup process you already have in place for repositories. Just copying
them somewhere will work fine too, of course.
-In particular, you should backup this configuration file which Phabricator
+In particular, you should backup this configuration file which Phorge
creates:
- phabricator/conf/local/local.json
+ phorge/conf/local/local.json
This file contains all of the configuration settings that have been adjusted
by using `bin/config set <key> <value>`.
Restore: Configuration Files
============================
To restore configuration files, just copy them into the right locations. Copy
-your backup of `local.json` to `phabricator/conf/local/local.json`.
+your backup of `local.json` to `phorge/conf/local/local.json`.
Security
========
-MySQL dumps have no builtin encryption and most data in Phabricator is stored in
+MySQL dumps have no builtin encryption and most data in Phorge is stored in
a raw, accessible form, so giving a user access to backups is a lot like giving
-them shell access to the machine Phabricator runs on. In particular, a user who
+them shell access to the machine Phorge runs on. In particular, a user who
has the backups can:
- read data that policies do not permit them to see;
- read email addresses and object secret keys; and
- read other users' session and conduit tokens and impersonate them.
Some of this information is durable, so disclosure of even a very old backup may
-present a risk. If you restrict access to the Phabricator host or database, you
+present a risk. If you restrict access to the Phorge host or database, you
should also restrict access to the backups.
Skipping Indexes
================
By default, `bin/storage dump` does not dump all of the data in the database:
it skips some caches which can be rebuilt automatically and do not need to be
backed up. Some of these caches are very large, so the size of the dump may
be significantly smaller than the size of the databases.
If you have a large amount of data, you can specify `--no-indexes` when taking
a database dump to skip additional tables which contain search indexes. This
will reduce the size (and increase the speed) of the backup. This is an
advanced option which most installs will not benefit from.
This index data can be rebuilt after a restore, but will not be rebuilt
automatically. If you choose to use this flag, you must manually rebuild
indexes after a restore (for details, see ((reindex))).
Next Steps
==========
Continue by:
- returning to the @{article:Configuration Guide}.
diff --git a/src/docs/user/configuration/configuring_encryption.diviner b/src/docs/user/configuration/configuring_encryption.diviner
index 36315506e6..e237573432 100644
--- a/src/docs/user/configuration/configuring_encryption.diviner
+++ b/src/docs/user/configuration/configuring_encryption.diviner
@@ -1,196 +1,196 @@
@title Configuring Encryption
@group config
Setup guide for configuring encryption.
Overview
========
-Phabricator supports at-rest encryption of uploaded file data stored in the
+Phorge supports at-rest encryption of uploaded file data stored in the
"Files" application.
Configuring at-rest file data encryption does not encrypt any other data or
resources. In particular, it does not encrypt the database and does not encrypt
Passphrase credentials.
-Attackers who compromise a Phabricator host can read the master key and decrypt
+Attackers who compromise a Phorge host can read the master key and decrypt
the data. In most configurations, this does not represent a significant
barrier above and beyond accessing the file data. Thus, configuring at-rest
encryption is primarily useful for two types of installs:
- If you maintain your own webserver and database hardware but want to use
Amazon S3 or a similar cloud provider as a blind storage server, file data
encryption can let you do so without needing to trust the cloud provider.
- If you face a regulatory or compliance need to encrypt data at rest but do
not need to actually secure this data, encrypting the data and placing the
master key in plaintext next to it may satisfy compliance requirements.
The remainder of this document discusses how to configure at-rest encryption.
Quick Start
===========
To configure encryption, you will generally follow these steps:
- Generate a master key with `bin/files generate-key`.
- Add the master key it to the `keyring`, but don't mark it as `default` yet.
- Use `bin/files encode ...` to test encrypting a few files.
- Mark the key as `default` to automatically encrypt new files.
- Use `bin/files encode --all ...` to encrypt any existing files.
See the following sections for detailed guidance on these steps.
Configuring a Keyring
=====================
To configure a keyring, set `keyring` with `bin/config` or by using another
configuration source. This option should be a list of keys in this format:
```lang=json
...
"keyring": [
{
"name": "master.key",
"type": "aes-256-cbc",
"material.base64": "UcHUJqq8MhZRwhvDV8sJwHj7bNJoM4tWfOIi..."
"default": true
},
...
]
...
```
Each key should have these properties:
- `name`: //Required string.// A unique key name.
- `type`: //Required string.// Type of the key. Only `aes-256-cbc` is
supported.
- `material.base64`: //Required string.// The key material. See below for
details.
- `default`: //Optional bool.// Optionally, mark exactly one key as the
default key to enable encryption of newly uploaded file data.
The key material is sensitive and an attacker who learns it can decrypt data
from the storage engine.
Format: Raw Data
================
The `raw` storage format is automatically selected for all newly uploaded
file data if no key is marked as the `default` key in the keyring. This is
-the behavior of Phabricator if you haven't configured anything.
+the behavior of Phorge if you haven't configured anything.
This format stores raw data without modification.
Format: AES256
==============
The `aes-256-cbc` storage format is automatically selected for all newly
uploaded file data if an AES256 key is marked as the `default` key in the
keyring.
This format uses AES256 in CBC mode. Each block of file data is encrypted with
a unique, randomly generated private key. That key is then encrypted with the
master key. Among other motivations, this strategy allows the master key to be
cycled relatively cheaply later (see "Cycling Master Keys" below).
AES256 keys should be randomly generated and 256 bits (32 characters) in
length, then base64 encoded when represented in `keyring`.
You can generate a valid, properly encoded AES256 master key with this command:
```
-phabricator/ $ ./bin/files generate-key --type aes-256-cbc
+phorge/ $ ./bin/files generate-key --type aes-256-cbc
```
This mode is generally similar to the default server-side encryption mode
supported by Amazon S3.
Format: ROT13
=============
The `rot13` format is a test format that is never selected by default. You can
select this format explicitly with `bin/files encode` to test storage and
encryption behavior.
This format applies ROT13 encoding to file data.
Changing File Storage Formats
=============================
To test configuration, you can explicitly change the storage format of a file.
This will read the file data, decrypt it if necessary, write a new copy of the
data with the desired encryption, then update the file to point at the new
data. You can use this to make sure encryption works before turning it on by
default.
To change the format of an individual file, run this command:
```
-phabricator/ $ ./bin/files encode --as <format> F123 [--key <key>]
+phorge/ $ ./bin/files encode --as <format> F123 [--key <key>]
```
This will change the storage format of the specified file.
Verifying Storage Formats
=========================
You can review the storage format of a file from the web UI, in the
{nav Storage} tab under "Format". You can also use the "Engine" and "Handle"
properties to identify where the underlying data is stored and verify that
it is encrypted or encoded in the way you expect.
See @{article:Configuring File Storage} for more information on storage
engines.
Cycling Master Keys
===================
If you need to cycle your master key, some storage formats support key cycling.
Cycling a file's encryption key decodes the local key for the data using the
old master key, then re-encodes it using the new master key. This is primarily
useful if you believe your master key may have been compromised.
First, add a new key to the keyring and mark it as the default key. You need
to leave the old key in place for now so existing data can be decrypted.
To cycle an individual file, run this command:
```
-phabricator/ $ ./bin/files cycle F123
+phorge/ $ ./bin/files cycle F123
```
Verify that cycling worked properly by examining the command output and
accessing the file to check that the data is present and decryptable. You
can cycle additional files to gain additional confidence.
You can cycle all files with this command:
```
-phabricator/ $ ./bin/files cycle --all
+phorge/ $ ./bin/files cycle --all
```
Once all files have been cycled, remove the old master key from the keyring.
Not all storage formats support key cycling: cycling a file only has an effect
if the storage format is an encrypted format. For example, cycling a file that
uses the `raw` storage format has no effect.
Next Steps
==========
Continue by:
- understanding storage engines with @{article:Configuring File Storage}; or
- returning to the @{article:Configuration Guide}.
diff --git a/src/docs/user/configuration/configuring_file_domain.diviner b/src/docs/user/configuration/configuring_file_domain.diviner
index 6f7c410435..a54e850eb1 100644
--- a/src/docs/user/configuration/configuring_file_domain.diviner
+++ b/src/docs/user/configuration/configuring_file_domain.diviner
@@ -1,119 +1,119 @@
@title Configuring a File Domain
@group config
Setup guide for an alternate file domain or CDN.
Overview
========
-Serving files that users upload from the same domain that Phabricator runs on
+Serving files that users upload from the same domain that Phorge runs on
is a security risk.
In general, doing this creates a risk that users who have permission to upload
files may be able to upload specially crafted files (like Flash or Java
applets) which can execute with domain permissions in some contexts (usually
because of security issues with Flash and Java, but both products have a rich
history of security issues). The attacker can then trick another user into
executing the file and gain access to their session.
The best way to mitigate this threat is to serve files from a separate domain.
-For example, if Phabricator is hosted at `https://phabricator.example.com/`,
+For example, if Phorge is hosted at `https://phorge.example.com/`,
you can serve files from `https://files.exampleusercontent.com/`.
The alternate file domain should be a completely different domain from your
primary domain, not just a different subdomain. For example, Google uses
`googleusercontent.com`, //not// `usercontent.google.com`.
You can also configure the alternate file domain to serve through a CDN, which
will improve performance.
Approaches
=========
Broadly, you can either choose a CDN service and configure that (which will
also defuse the security risks) or you can configure a second domain with the
same settings as your first domain. A CDN service may be easier to set up and
can improve performance.
| Method | Setup Difficulty | Cost | Notes |
|---|---|---|---|
| AWS CloudFront | Very Easy | Cheap | Recommended |
| CloudFlare | Easy | Free/Cheap | Recommended |
| Self Hosted | Moderate | Free | No CDN unless you're an ops wizard. |
Approach: AWS CloudFront
========
CloudFront is a CDN service that's part of Amazon Web Services. It makes
particular sense to use if you're hosting your install in AWS.
To configure it, set up a new CloudFront distribution which is pointed at
-your Phabricator install as an origin (make sure you point it at the primary
+your Phorge install as an origin (make sure you point it at the primary
domain name of your install, not just a load balancer or instance). You do not
need to set up a new domain name, which makes setup a bit more straightforward.
Most settings can be left at their default values, but you should change
the **Allowed HTTP Methods** setting from `GET, HEAD` to
`GET, HEAD, OPTIONS, PUT, POST, PATCH, DELETE`.
Once configured, accessing the distribution's domain name should return a
-Phabricator error page indicating that Phabricator does not recognize the
+Phorge error page indicating that Phorge does not recognize the
domain. If you see this page, it means you've configured things correctly.
-Continue to "Configuring Phabricator", below.
+Continue to "Configuring Phorge", below.
Approach: CloudFlare
========
WARNING: You should review all your CloudFlare settings, and be very
sure to turn off all JavaScript, HTML, CSS minification and
optimization features, including systems like "Rocket Loader". These
-features will break Phabricator in strange and mysterious ways that
+features will break Phorge in strange and mysterious ways that
are unpredictable. Only allow CloudFlare to cache files, and never
optimize them.
[[ https://cloudflare.com | CloudFlare ]] is a general-purpose CDN service.
To set up CloudFlare, you'll need to register a second domain and go through
their enrollment process to host the alternate domain on their servers. Use a
-CNAME record to forward a subdomain to your Phabricator install.
+CNAME record to forward a subdomain to your Phorge install.
CloudFlare will automatically generate SSL certificates for hosted domains,
which can significantly reduce the cost and complexity of setup.
Once configured, accessing the CNAME-forwarded subdomain should return a
-Phabricator error page indicating that Phabricator does not recognize the
+Phorge error page indicating that Phorge does not recognize the
domain. If you see this page, it means you've configured things correctly.
-Continue to "Configuring Phabricator", below.
+Continue to "Configuring Phorge", below.
Approach: Self Hosted
========
To do this, just set up a second domain exactly like your primary domain is
-set up. When setup is complete, visiting the domain should return a Phabricator
-error page indicating that Phabricator does not recognize the domain. This
+set up. When setup is complete, visiting the domain should return a Phorge
+error page indicating that Phorge does not recognize the domain. This
means that you've configured things correctly.
Note that if you use SSL (which you should), you'll also need to get a
certificate for this alternate domain and configure that, too.
You can also configure a self-hosted domain to route through a caching server
to provide some of the performance benefits of a CDN, but this is advanced and
outside the scope of this documentation.
-Continue to "Configuring Phabricator", below.
+Continue to "Configuring Phorge", below.
-Configuring Phabricator
+Configuring Phorge
========
-After you've set up a CDN or an alternate domain, configure Phabricator to
+After you've set up a CDN or an alternate domain, configure Phorge to
recognize the domain. Run this command, providing the domain you have
configured in place of the `<domain>` token. You should include the protocol,
-so an example domain might be `https://cdn.phabcdn.net/`.
+so an example domain might be `https://cdn.examplecdn.com/`.
- phabricator/ $ ./bin/config set security.alternate-file-domain <domain>
+ phorge/ $ ./bin/config set security.alternate-file-domain <domain>
-Phabricator should now serve CSS, JS, images, profile pictures, and user
+Phorge should now serve CSS, JS, images, profile pictures, and user
content through the file domain. You can verify this with "View Source" or
by downloading a file and checking the URL.
diff --git a/src/docs/user/configuration/configuring_file_storage.diviner b/src/docs/user/configuration/configuring_file_storage.diviner
index d6abb22c13..6d7fb247b9 100644
--- a/src/docs/user/configuration/configuring_file_storage.diviner
+++ b/src/docs/user/configuration/configuring_file_storage.diviner
@@ -1,202 +1,202 @@
@title Configuring File Storage
@group config
Setup file storage and support for large files.
Overview
========
-This document describes how to configure Phabricator to support large file
-uploads, and how to choose where Phabricator stores files.
+This document describes how to configure Phorge to support large file
+uploads, and how to choose where Phorge stores files.
There are two major things to configure:
- set up PHP and your HTTP server to accept large requests;
- choose and configure a storage engine.
The following sections will guide you through this configuration.
-How Phabricator Stores Files
+How Phorge Stores Files
============================
-Phabricator stores files in "storage engines", which are modular backends
+Phorge stores files in "storage engines", which are modular backends
that implement access to some storage system (like MySQL, the filesystem, or
a cloud storage service like Amazon S3).
-Phabricator stores large files by breaking them up into many chunks (a few
+Phorge stores large files by breaking them up into many chunks (a few
megabytes in size) and storing the chunks in an underlying storage engine.
-This makes it easier to implement new storage engines and gives Phabricator
+This makes it easier to implement new storage engines and gives Phorge
more flexibility in managing file data.
The first section of this document discusses configuring your install so that
PHP and your HTTP server will accept requests which are larger than the size of
one file chunk. Without this configuration, file chunk data will be rejected.
The second section discusses choosing and configuring storage engines, so data
is stored where you want it to be.
Configuring Upload Limits
=========================
File uploads are limited by several pieces of configuration at different layers
of the stack. Generally, the minimum value of all the limits is the effective
one.
To upload large files, you need to increase all the limits to at least
-**32MB**. This will allow you to upload file chunks, which will let Phabricator
+**32MB**. This will allow you to upload file chunks, which will let Phorge
store arbitrarily large files.
The settings which limit file uploads are:
**HTTP Server**: The HTTP server may set a limit on the maximum request size.
If you exceed this limit, you'll see a default server page with an HTTP error.
These directives limit the total size of the request body, so they must be
somewhat larger than the desired maximum filesize.
- **Apache**: Apache limits requests with the Apache `LimitRequestBody`
directive.
- **nginx**: nginx limits requests with the nginx `client_max_body_size`
directive. This often defaults to `1M`.
- **lighttpd**: lighttpd limits requests with the lighttpd
`server.max-request-size` directive.
-Set the applicable limit to at least **32MB**. Phabricator can not read these
+Set the applicable limit to at least **32MB**. Phorge can not read these
settings, so it can not raise setup warnings if they are misconfigured.
**PHP**: PHP has several directives which limit uploads. These directives are
found in `php.ini`.
- **post_max_size**: Maximum POST request size PHP will accept. If you
- exceed this, Phabricator will give you a useful error. This often defaults
- to `8M`. Set this to at least `32MB`. Phabricator will give you a setup
+ exceed this, Phorge will give you a useful error. This often defaults
+ to `8M`. Set this to at least `32MB`. Phorge will give you a setup
warning about this if it is set too low.
- **memory_limit**: For some uploads, file data will be read into memory
- before Phabricator can adjust the memory limit. If you exceed this, PHP
+ before Phorge can adjust the memory limit. If you exceed this, PHP
may give you a useful error, depending on your configuration. It is
- recommended that you set this to `-1` to disable it. Phabricator will
+ recommended that you set this to `-1` to disable it. Phorge will
give you a setup warning about this if it is set too low.
You may also want to configure these PHP options:
- **max_input_vars**: When files are uploaded via HTML5 drag and drop file
upload APIs, PHP parses the file body as though it contained normal POST
parameters, and may trigger `max_input_vars` if a file has a lot of
brackets in it. You may need to set it to some astronomically high value.
- **upload_max_filesize**: Maximum file size PHP will accept in a raw file
upload. This is not normally used when uploading files via drag-and-drop,
but affects some other kinds of file uploads. If you exceed this,
- Phabricator will give you a useful error. This often defaults to `2M`. Set
+ Phorge will give you a useful error. This often defaults to `2M`. Set
this to at least `32MB`.
Once you've adjusted all this configuration, your server will be able to
receive chunk uploads. As long as you have somewhere to store them, this will
enable you to store arbitrarily large files.
Storage Engines
===============
-Phabricator supports several different file storage engines:
+Phorge supports several different file storage engines:
| Engine | Setup | Cost | Notes |
|--------|-------|------|-------|
| MySQL | Automatic | Free | May not scale well. |
| Local Disk | Easy | Free | Does not scale well. |
| Amazon S3 | Easy | Cheap | Scales well. |
| Custom | Hard | Varies | Implement a custom storage engine. |
You can review available storage engines and their configuration by navigating
to {nav Applications > Files > Help/Options > Storage Engines} in the web UI.
-By default, Phabricator is configured to store files up to 1MB in MySQL, and
+By default, Phorge is configured to store files up to 1MB in MySQL, and
reject files larger than 1MB. To store larger files, you can either:
- increase the MySQL limit to at least 8MB; or
- configure another storage engine.
Doing either of these will enable the chunk storage engine and support for
arbitrarily large files.
The remaining sections of this document discuss the available storage engines
and how to configure them.
Engine: MySQL
=============
- **Pros**: Low latency, no setup required.
- **Cons**: Storing files in a database is a classic bad idea. May become
difficult to administrate if you have a large amount of data.
MySQL storage is configured by default, for files up to (just under) 1MB. You
can configure it with these keys:
- `storage.mysql-engine.max-size`: Change the filesize limit, in bytes. Set
to 0 to disable.
For most installs, it is reasonable to leave this engine as-is and let small
files (like thumbnails and profile images) be stored in MySQL, which is usually
the lowest-latency filestore, even if you configure another storage engine.
To support large files, increase this limit to at least `8388608` (8MB).
This will activate chunk storage in MySQL.
Engine: Local Disk
==================
- **Pros**: Simple to setup.
- **Cons**: Doesn't scale to multiple web frontends without NFS.
To configure file storage on the local disk, set:
- `storage.local-disk.path`: Set to some writable directory on local disk.
Make that directory.
Engine: Amazon S3
=================
- **Pros**: Scales well.
- **Cons**: Slightly more complicated than other engines, not free.
To enable file storage in S3, set these keys:
- `amazon-s3.access-key`: Your AWS access key.
- `amazon-s3.secret-key`: Your AWS secret key.
- `amazon-s3.region`: Your AWS S3 region.
- `amazon-s3.endpoint`: Your AWS S3 endpoint.
- `storage.s3.bucket`: S3 bucket name where files should be stored.
Testing Storage Engines
=======================
You can test that things are correctly configured by dragging and dropping
-a file onto the Phabricator home page. If engines have been configured
+a file onto the Phorge home page. If engines have been configured
properly, the file should upload.
Migrating Files Between Engines
===============================
If you want to move files between storage engines, you can use the `bin/files`
script to perform migrations. For example, suppose you previously used MySQL but
recently set up S3 and want to migrate all your files there. First, migrate one
file to make sure things work:
- phabricator/ $ ./bin/files migrate --engine amazon-s3 F12345
+ phorge/ $ ./bin/files migrate --engine amazon-s3 F12345
If that works properly, you can then migrate everything:
- phabricator/ $ ./bin/files migrate --engine amazon-s3 --all
+ phorge/ $ ./bin/files migrate --engine amazon-s3 --all
You can use `--dry-run` to show which migrations would be performed without
taking any action. Run `bin/files help` for more options and information.
Next Steps
==========
Continue by:
- reviewing at-rest encryption options with
@{article:Configuring Encryption}; or
- returning to the @{article:Configuration Guide}.
diff --git a/src/docs/user/configuration/configuring_inbound_email.diviner b/src/docs/user/configuration/configuring_inbound_email.diviner
index b1ad08b7dd..00861cc93e 100644
--- a/src/docs/user/configuration/configuring_inbound_email.diviner
+++ b/src/docs/user/configuration/configuring_inbound_email.diviner
@@ -1,291 +1,291 @@
@title Configuring Inbound Email
@group config
This document contains instructions for configuring inbound email, so users
-may interact with some Phabricator applications via email.
+may interact with some Phorge applications via email.
Preamble
========
-Phabricator can process inbound mail in two general ways:
+Phorge can process inbound mail in two general ways:
**Handling Replies**: When users reply to email notifications about changes,
-Phabricator can turn email into comments on the relevant discussion thread.
+Phorge can turn email into comments on the relevant discussion thread.
**Creating Objects**: You can configure an address like `bugs@yourcompany.com`
to create new objects (like tasks) when users send email.
In either case, users can interact with objects via mail commands to apply a
broader set of changes to objects beyond commenting. (For example, you can use
`!close` to close a task or `!priority` to change task priority.)
To configure inbound mail, you will generally:
- - Configure some mail domain to submit mail to Phabricator for processing.
+ - Configure some mail domain to submit mail to Phorge for processing.
- For handling replies, set `metamta.reply-handler-domain` in your
configuration.
- For handling email that creates objects, configure inbound addresses in the
relevant application.
See below for details on each of these steps.
Configuration Overview
======================
Usually, the most challenging part of configuring inbound mail is getting mail
-delivered to Phabricator for processing. This step can be made much easier if
-you use a third-party mail service which can submit mail to Phabricator via
+delivered to Phorge for processing. This step can be made much easier if
+you use a third-party mail service which can submit mail to Phorge via
webhooks.
-Some available approaches for delivering mail to Phabricator are:
+Some available approaches for delivering mail to Phorge are:
| Receive Mail With | Setup | Cost | Notes |
|--------|-------|------|-------|
| Mailgun | Easy | Cheap | Recommended |
| Postmark | Easy | Cheap | Recommended |
| SendGrid | Easy | Cheap | |
| Local MTA | Difficult | Free | Discouraged |
-The remainder of this document walks through configuring Phabricator to
+The remainder of this document walks through configuring Phorge to
receive mail, and then configuring your chosen transport to deliver mail
-to Phabricator.
+to Phorge.
Configuring "Reply" Email
=========================
-By default, Phabricator uses a `noreply@phabricator.example.com` email address
+By default, Phorge uses a `noreply@phorge.example.com` email address
as the "From" address when it sends mail. The exact address it uses can be
configured with `metamta.default-address`.
-When a user takes an action that generates mail, Phabricator sets the
+When a user takes an action that generates mail, Phorge sets the
"Reply-To" addresss for the mail to that user's name and address. This means
that users can reply to email to discuss changes, but: the conversation won't
-be recorded in Phabricator; and users will not be able to use email commands
+be recorded in Phorge; and users will not be able to use email commands
to take actions or make edits.
-To change this behavior so that users can interact with objects in Phabricator
+To change this behavior so that users can interact with objects in Phorge
over email, change the configuration key `metamta.reply-handler-domain` to some
domain you configure according to the instructions below, e.g.
-`phabricator.example.com`. Once you set this key, email will use a
-"Reply-To" like `T123+273+af310f9220ad@phabricator.example.com`, which -- when
+`phorge.example.com`. Once you set this key, email will use a
+"Reply-To" like `T123+273+af310f9220ad@phorge.example.com`, which -- when
configured correctly, according to the instructions below -- will parse incoming
email and allow users to interact with Differential revisions, Maniphest tasks,
etc. over email.
-If you don't want Phabricator to take up an entire domain (or subdomain) you
+If you don't want Phorge to take up an entire domain (or subdomain) you
can configure a general prefix so you can use a single mailbox to receive mail
on. To make use of this set `metamta.single-reply-handler-prefix` to the
-prefix of your choice, and Phabricator will prepend this to the "Reply-To"
+prefix of your choice, and Phorge will prepend this to the "Reply-To"
mail address. This works because everything up to the first (optional) '+'
character in an email address is considered the receiver, and everything
after is essentially ignored.
Configuring "Create" Email
==========================
You can set up application email addresses to allow users to create objects via
-email. For example, you could configure `bugs@phabricator.example.com` to
+email. For example, you could configure `bugs@phorge.example.com` to
create a Maniphest task out of any email which is sent to it.
You can find application email settings for each application at:
{nav icon=home, name=Home >
Applications >
type=instructions, name="Select an Application" >
icon=cog, name=Configure}
Not all applications support creating objects via email.
In some applications, including Maniphest, you can also configure Herald rules
with the `[ Content source ]` and/or `[ Receiving email address ]` fields to
route or handle objects based on which address mail was sent to.
You'll also need to configure the actual mail domain to submit mail to
-Phabricator by following the instructions below. Phabricator will let you add
+Phorge by following the instructions below. Phorge will let you add
any address as an application address, but can only process mail which is
actually delivered to it.
Security
========
The email reply channel is "somewhat" authenticated. Each reply-to address is
unique to the recipient and includes a hash of user information and a unique
object ID, so it can only be used to update that object and only be used to act
on behalf of the recipient.
However, if an address is leaked (which is fairly easy -- for instance,
forwarding an email will leak a live reply address, or a user might take a
screenshot), //anyone// who can send mail to your reply-to domain may interact
with the object the email relates to as the user who leaked the mail. Because
the authentication around email has this weakness, some actions (like accepting
revisions) are not permitted over email.
This implementation is an attempt to balance utility and security, but makes
some sacrifices on both sides to achieve it because of the difficulty of
authenticating senders in the general case (e.g., where you are an open source
project and need to interact with users whose email accounts you have no control
over).
-You can also set `metamta.public-replies`, which will change how Phabricator
+You can also set `metamta.public-replies`, which will change how Phorge
delivers email. Instead of sending each recipient a unique mail with a personal
reply-to address, it will send a single email to everyone with a public reply-to
address. This decreases security because anyone who can spoof a "From" address
can act as another user, but increases convenience if you use mailing lists and,
practically, is a reasonable setting for many installs. The reply-to address
will still contain a hash unique to the object it represents, so users who have
not received an email about an object can not blindly interact with it.
If you enable application email addresses, those addresses also use the weaker
"From" authentication mechanism.
-NOTE: Phabricator does not currently attempt to verify "From" addresses because
+NOTE: Phorge does not currently attempt to verify "From" addresses because
this is technically complex, seems unreasonably difficult in the general case,
and no installs have had a need for it yet. If you have a specific case where a
reasonable mechanism exists to provide sender verification (e.g., DKIM
signatures are sufficient to authenticate the sender under your configuration,
or you are willing to require all users to sign their email), file a feature
request.
Testing and Debugging Inbound Email
===================================
You can use the `bin/mail` utility to test and review inbound mail. This can
-help you determine if mail is being delivered to Phabricator or not:
+help you determine if mail is being delivered to Phorge or not:
- phabricator/ $ ./bin/mail list-inbound # List inbound messages.
- phabricator/ $ ./bin/mail show-inbound # Show details about a message.
+ phorge/ $ ./bin/mail list-inbound # List inbound messages.
+ phorge/ $ ./bin/mail show-inbound # Show details about a message.
You can also test receiving mail, but note that this just simulates receiving
the mail and doesn't send any information over the network. It is
primarily aimed at developing email handlers: it will still work properly
if your inbound email configuration is incorrect or even disabled.
- phabricator/ $ ./bin/mail receive-test # Receive test message.
+ phorge/ $ ./bin/mail receive-test # Receive test message.
Run `bin/mail help <command>` for detailed help on using these commands.
Mailgun Setup
=============
To use Mailgun, you need a Mailgun account. You can sign up at
<http://www.mailgun.com>. Provided you have such an account, configure it
like this:
- Configure a mail domain according to Mailgun's instructions.
- Add a Mailgun route with a `catch_all()` rule which takes the action
- `forward("https://phabricator.example.com/mail/mailgun/")`. Replace the
+ `forward("https://phorge.example.com/mail/mailgun/")`. Replace the
example domain with your actual domain.
- Configure a mailer in `cluster.mailers` with your Mailgun API key.
Postmark Setup
==============
To process inbound mail from Postmark, configure this URI as your inbound
webhook URI in the Postmark control panel:
```
-https://<phabricator.yourdomain.com>/mail/postmark/
+https://<phorge.yourdomain.com>/mail/postmark/
```
See also the Postmark section in @{article:Configuring Outbound Email} for
discussion of the remote address whitelist used to verify that requests this
endpoint receives are authentic requests originating from Postmark.
SendGrid Setup
==============
To use SendGrid, you need a SendGrid account with access to the "Parse API" for
inbound email. Provided you have such an account, configure it like this:
- Configure an MX record according to SendGrid's instructions, i.e. add
- `phabricator.example.com MX 10 mx.sendgrid.net.` or similar.
+ `phorge.example.com MX 10 mx.sendgrid.net.` or similar.
- Go to the "Parse Incoming Emails" page on SendGrid
(<http://sendgrid.com/developer/reply>) and add the domain as the
"Hostname".
- - Add the URL `https://phabricator.example.com/mail/sendgrid/` as the "Url",
+ - Add the URL `https://phorge.example.com/mail/sendgrid/` as the "Url",
using your domain (and HTTP instead of HTTPS if you are not configured with
SSL).
- If you get an error that the hostname "can't be located or verified", it
means your MX record is either incorrectly configured or hasn't propagated
yet.
- - Set `metamta.reply-handler-domain` to `phabricator.example.com`
+ - Set `metamta.reply-handler-domain` to `phorge.example.com`
(whatever you configured the MX record for).
That's it! If everything is working properly you should be able to send email
-to `anything@phabricator.example.com` and it should appear in
+to `anything@phorge.example.com` and it should appear in
`bin/mail list-inbound` within a few seconds.
Local MTA: Installing Mailparse
===============================
If you're going to run your own MTA, you need to install the PECL mailparse
extension. In theory, you can do that with:
$ sudo pecl install mailparse
You may run into an error like "needs mbstring". If so, try:
$ sudo yum install php-mbstring # or equivalent
$ sudo pecl install -n mailparse
If you get a linker error like this:
COUNTEREXAMPLE
PHP Warning: PHP Startup: Unable to load dynamic library
'/usr/lib64/php/modules/mailparse.so' - /usr/lib64/php/modules/mailparse.so:
undefined symbol: mbfl_name2no_encoding in Unknown on line 0
...you need to edit your php.ini file so that mbstring.so is loaded **before**
mailparse.so. This is not the default if you have individual files in
`php.d/`.
Local MTA: Configuring Sendmail
===============================
Before you can configure Sendmail, you need to install Mailparse. See the
section "Installing Mailparse" above.
Sendmail is very difficult to configure. First, you need to configure it for
your domain so that mail can be delivered correctly. In broad strokes, this
probably means something like this:
- add an MX record;
- make sendmail listen on external interfaces;
- open up port 25 if necessary (e.g., in your EC2 security policy);
- add your host to /etc/mail/local-host-names; and
- restart sendmail.
-Now, you can actually configure sendmail to deliver to Phabricator. In
+Now, you can actually configure sendmail to deliver to Phorge. In
`/etc/aliases`, add an entry like this:
- phabricator: "| /path/to/phabricator/scripts/mail/mail_handler.php"
+ phorge: "| /path/to/phorge/scripts/mail/mail_handler.php"
If you use the `PHABRICATOR_ENV` environmental variable to select a
configuration, you can pass the value to the script as an argument:
.../path/to/mail_handler.php <ENV>
This is an advanced feature which is rarely used. Most installs should run
without an argument.
After making this change, run `sudo newaliases`. Now you likely need to symlink
this script into `/etc/smrsh/`:
- sudo ln -s /path/to/phabricator/scripts/mail/mail_handler.php /etc/smrsh/
+ sudo ln -s /path/to/phorge/scripts/mail/mail_handler.php /etc/smrsh/
Finally, edit `/etc/mail/virtusertable` and add an entry like this:
- @yourdomain.com phabricator@localhost
+ @yourdomain.com phorge@localhost
-That will forward all mail to @yourdomain.com to the Phabricator processing
+That will forward all mail to @yourdomain.com to the Phorge processing
script. Run `sudo /etc/mail/make` or similar and then restart sendmail with
`sudo /etc/init.d/sendmail restart`.
diff --git a/src/docs/user/configuration/configuring_outbound_email.diviner b/src/docs/user/configuration/configuring_outbound_email.diviner
index 736d4f625c..327bf974ac 100644
--- a/src/docs/user/configuration/configuring_outbound_email.diviner
+++ b/src/docs/user/configuration/configuring_outbound_email.diviner
@@ -1,512 +1,512 @@
@title Configuring Outbound Email
@group config
-Instructions for configuring Phabricator to send email and other types of
+Instructions for configuring Phorge to send email and other types of
messages, like text messages.
Overview
========
-Phabricator sends outbound messages through "mailers". Most mailers send
+Phorge sends outbound messages through "mailers". Most mailers send
email and most messages are email messages, but mailers may also send other
types of messages (like text messages).
-Phabricator can send outbound messages through multiple different mailers,
+Phorge can send outbound messages through multiple different mailers,
including a local mailer or various third-party services. Options include:
| Send Mail With | Setup | Cost | Inbound | Media | Notes |
|----------------|-------|------|---------|-------|-------|
| Postmark | Easy | Cheap | Yes | Email | Recommended |
| Mailgun | Easy | Cheap | Yes | Email | Recommended |
| Amazon SES | Easy | Cheap | No | Email | |
| SendGrid | Medium | Cheap | Yes | Email | |
| Twilio | Easy | Cheap | No | SMS | Recommended |
| Amazon SNS | Easy | Cheap | No | SMS | Recommended |
| External SMTP | Medium | Varies | No | Email | Gmail, etc. |
| Local SMTP | Hard | Free | No | Email | sendmail, postfix, etc |
| Custom | Hard | Free | No | All | Write a custom mailer. |
| Drop in a Hole | Easy | Free | No | All | Drops mail in a deep, dark hole. |
See below for details on how to select and configure mail delivery for each
mailer.
For email, Postmark or Mailgun are recommended because they make it easy to
set up inbound and outbound mail and have good track records in our production
services. Other services will also generally work well, but they may be more
difficult to set up.
For SMS, Twilio or SNS are recommended. They're also your only upstream
options.
If you have some internal mail or messaging service you'd like to use you can
also write a custom mailer, but this requires digging into the code.
-Phabricator sends mail in the background, so the daemons need to be running for
+Phorge sends mail in the background, so the daemons need to be running for
it to be able to deliver mail. You should receive setup warnings if they are
not. For more information on using daemons, see
@{article:Managing Daemons with phd}.
Outbound "From" and "To" Addresses
==================================
-When Phabricator sends outbound mail, it must select some "From" address to
+When Phorge sends outbound mail, it must select some "From" address to
send mail from, since mailers require this.
-When mail only has "CC" recipients, Phabricator generates a dummy "To" address,
+When mail only has "CC" recipients, Phorge generates a dummy "To" address,
since some mailers require this and some users write mail rules that depend
on whether they appear in the "To" or "CC" line.
In both cases, the address should ideally correspond to a valid, deliverable
mailbox that accepts the mail and then simply discards it. If the address is
not valid, some outbound mail will bounce, and users will receive bounces when
they "Reply All" even if the other recipients for the message are valid. In
contrast, if the address is a real user address, that user will receive a lot
of mail they probably don't want.
If you plan to configure //inbound// mail later, you usually don't need to do
-anything. Phabricator will automatically create a `noreply@` mailbox which
+anything. Phorge will automatically create a `noreply@` mailbox which
works the right way (accepts and discards all mail it receives) and
automatically use it when generating addresses.
If you don't plan to configure inbound mail, you may need to configure an
-address for Phabricator to use. You can do this by setting
+address for Phorge to use. You can do this by setting
`metamta.default-address`.
Configuring Mailers
===================
Configure one or more mailers by listing them in the the `cluster.mailers`
configuration option. Most installs only need to configure one mailer, but you
can configure multiple mailers to provide greater availability in the event of
a service disruption.
A valid `cluster.mailers` configuration looks something like this:
```lang=json
[
{
"key": "mycompany-mailgun",
"type": "mailgun",
"options": {
"domain": "mycompany.com",
"api-key": "..."
}
},
...
]
```
The supported keys for each mailer are:
- `key`: Required string. A unique name for this mailer.
- `type`: Required string. Identifies the type of mailer. See below for
options.
- `priority`: Optional string. Advanced option which controls load balancing
and failover behavior. See below for details.
- `options`: Optional map. Additional options for the mailer type.
- `inbound`: Optional bool. Use `false` to prevent this mailer from being
used to receive inbound mail.
- `outbound`: Optional bool. Use `false` to prevent this mailer from being
used to send outbound mail.
- `media`: Optional list<string>. Some mailers support delivering multiple
types of messages (like Email and SMS). If you want to configure a mailer
to support only a subset of possible message types, list only those message
types. Normally, you do not need to configure this. See below for a list
of media types.
The `type` field can be used to select these mailer services:
- `mailgun`: Use Mailgun.
- `ses`: Use Amazon SES.
- `sendgrid`: Use SendGrid.
- `postmark`: Use Postmark.
- `twilio`: Use Twilio.
- `sns`: Use Amazon SNS.
It also supports these local mailers:
- `sendmail`: Use the local `sendmail` binary.
- `smtp`: Connect directly to an SMTP server.
- `test`: Internal mailer for testing. Does not send mail.
-You can also write your own mailer by extending `PhabricatorMailAdapter`.
+You can also write your own mailer by extending `PhorgeMailAdapter`.
The `media` field supports these values:
- `email`: Configure this mailer for email.
- `sms`: Configure this mailer for SMS.
Once you've selected a mailer, find the corresponding section below for
instructions on configuring it.
Setting Complex Configuration
=============================
Mailers can not be edited from the web UI. If mailers could be edited from
the web UI, it would give an attacker who compromised an administrator account
a lot of power: they could redirect mail to a server they control and then
intercept mail for any other account, including password reset mail.
For more information about locked configuration options, see
@{article:Configuration Guide: Locked and Hidden Configuration}.
Setting `cluster.mailers` from the command line using `bin/config set` can be
tricky because of shell escaping. The easiest way to do it is to use the
`--stdin` flag. First, put your desired configuration in a file like this:
```lang=json, name=mailers.json
[
{
"key": "test-mailer",
"type": "test"
}
]
```
Then set the value like this:
```
-phabricator/ $ ./bin/config set --stdin cluster.mailers < mailers.json
+phorge/ $ ./bin/config set --stdin cluster.mailers < mailers.json
```
For alternatives and more information on configuration, see
@{article:Configuration User Guide: Advanced Configuration}
Mailer: Postmark
================
| Media | Email
|---------|
| Inbound | Yes
|---------|
Postmark is a third-party email delivery service. You can learn more at
<https://www.postmarkapp.com/>.
To use this mailer, set `type` to `postmark`, then configure these `options`:
- `access-token`: Required string. Your Postmark access token.
- `inbound-addresses`: Optional list<string>. Address ranges which you
will accept inbound Postmark HTTP webook requests from.
The default address list is preconfigured with Postmark's address range, so
you generally will not need to set or adjust it.
The option accepts a list of CIDR ranges, like `1.2.3.4/16` (IPv4) or
`::ffff:0:0/96` (IPv6). The default ranges are:
```lang=json
[
"50.31.156.6/32",
"50.31.156.77/32",
"18.217.206.57/32"
]
```
The default address ranges were last updated in January 2019, and were
documented at: <https://postmarkapp.com/support/article/800-ips-for-firewalls>
Mailer: Mailgun
===============
| Media | Email
|---------|
| Inbound | Yes
|---------|
Mailgun is a third-party email delivery service. You can learn more at
<https://www.mailgun.com>. Mailgun is easy to configure and works well.
To use this mailer, set `type` to `mailgun`, then configure these `options`:
- `api-key`: Required string. Your Mailgun API key.
- `domain`: Required string. Your Mailgun domain.
- `api-hostname`: Optional string. Defaults to "api.mailgun.net". If your
account is in another region (like the EU), you may need to specify a
different hostname. Consult the Mailgun documentation.
Mailer: Amazon SES
==================
| Media | Email
|---------|
| Inbound | No
|---------|
Amazon SES is Amazon's cloud email service. You can learn more at
<https://aws.amazon.com/ses/>.
To use this mailer, set `type` to `ses`, then configure these `options`:
- `access-key`: Required string. Your Amazon SES access key.
- `secret-key`: Required string. Your Amazon SES secret key.
- `region`: Required string. Your Amazon SES region, like `us-west-2`.
- `endpoint`: Required string. Your Amazon SES endpoint, like
`email.us-west-2.amazonaws.com`.
NOTE: Amazon SES **requires you to verify your "From" address**. Configure
which "From" address to use by setting `metamta.default-address` in your
config, then follow the Amazon SES verification process to verify it. You
won't be able to send email until you do this!
Mailer: Twilio
==================
| Media | SMS
|---------|
| Inbound | No
|---------|
Twilio is a third-party notification service. You can learn more at
<https://www.twilio.com/>.
To use this mailer, set `type` to `twilio`, then configure these options:
- `account-sid`: Your Twilio Account SID.
- `auth-token`: Your Twilio Auth Token.
- `from-number`: Number to send text messages from, in E.164 format
(like `+15551237890`).
Mailer: Amazon SNS
==================
| Media | SMS
|---------|
| Inbound | No
|---------|
Amazon SNS is Amazon's cloud notification service. You can learn more at
<https://aws.amazon.com/sns/>. Note that this mailer is only able to send
SMS messages, not emails.
To use this mailer, set `type` to `sns`, then configure these options:
- `access-key`: Required string. Your Amazon SNS access key.
- `secret-key`: Required string. Your Amazon SNS secret key.
- `endpoint`: Required string. Your Amazon SNS endpoint.
- `region`: Required string. Your Amazon SNS region.
You can find the correct `region` value for your endpoint in the SNS
documentation.
Mailer: SendGrid
================
| Media | Email
|---------|
| Inbound | Yes
|---------|
SendGrid is a third-party email delivery service. You can learn more at
<https://sendgrid.com/>.
You can configure SendGrid in two ways: you can send via SMTP or via the REST
-API. To use SMTP, configure Phabricator to use an `smtp` mailer.
+API. To use SMTP, configure Phorge to use an `smtp` mailer.
To use the REST API mailer, set `type` to `sendgrid`, then configure
these `options`:
- `api-key`: Required string. Your SendGrid API key.
Older versions of the SendGrid API used different sets of credentials,
including an "API User". Make sure you're configuring your "API Key".
Mailer: Sendmail
================
| Media | Email
|---------|
| Inbound | Requires Configuration
|---------|
This requires a `sendmail` binary to be installed on the system. Most MTAs
(e.g., sendmail, qmail, postfix) should install one for you, but your machine
may not have one installed by default. For install instructions, consult the
documentation for your favorite MTA.
Since you'll be sending the mail yourself, you are subject to things like SPF
rules, blackholes, and MTA configuration which are beyond the scope of this
document. If you can already send outbound email from the command line or know
how to configure it, this option is straightforward. If you have no idea how to
do any of this, strongly consider using Postmark or Mailgun instead.
To use this mailer, set `type` to `sendmail`, then configure these `options`:
- - `message-id`: Optional bool. Set to `false` if Phabricator will not be
+ - `message-id`: Optional bool. Set to `false` if Phorge will not be
able to select a custom "Message-ID" header when sending mail via this
mailer. See "Message-ID Headers" below.
Mailer: SMTP
============
| Media | Email
|---------|
| Inbound | Requires Configuration
|---------|
You can use this adapter to send mail via an external SMTP server, like Gmail.
To use this mailer, set `type` to `smtp`, then configure these `options`:
- `host`: Required string. The hostname of your SMTP server.
- `port`: Optional int. The port to connect to on your SMTP server.
- `user`: Optional string. Username used for authentication.
- `password`: Optional string. Password for authentication.
- `protocol`: Optional string. Set to `tls` or `ssl` if necessary. Use
`ssl` for Gmail.
- - `message-id`: Optional bool. Set to `false` if Phabricator will not be
+ - `message-id`: Optional bool. Set to `false` if Phorge will not be
able to select a custom "Message-ID" header when sending mail via this
mailer. See "Message-ID Headers" below.
Disable Mail
============
| Media | All
|---------|
| Inbound | No
|---------|
To disable mail, just don't configure any mailers. (You can safely ignore the
setup warning reminding you to set up mailers if you don't plan to configure
any.)
Testing and Debugging Outbound Email
====================================
You can use the `bin/mail` utility to test, debug, and examine outbound mail. In
particular:
- phabricator/ $ ./bin/mail list-outbound # List outbound mail.
- phabricator/ $ ./bin/mail show-outbound # Show details about messages.
- phabricator/ $ ./bin/mail send-test # Send test messages.
+ phorge/ $ ./bin/mail list-outbound # List outbound mail.
+ phorge/ $ ./bin/mail show-outbound # Show details about messages.
+ phorge/ $ ./bin/mail send-test # Send test messages.
Run `bin/mail help <command>` for more help on using these commands.
By default, `bin/mail send-test` sends email messages, but you can use
the `--type` flag to send different types of messages.
You can monitor daemons using the Daemon Console (`/daemon/`, or click
**Daemon Console** from the homepage).
Priorities
==========
-By default, Phabricator will try each mailer in order: it will try the first
+By default, Phorge will try each mailer in order: it will try the first
mailer first. If that fails (for example, because the service is not available
at the moment) it will try the second mailer, and so on.
If you want to load balance between multiple mailers instead of using one as
-a primary, you can set `priority`. Phabricator will start with mailers in the
+a primary, you can set `priority`. Phorge will start with mailers in the
highest priority group and go through them randomly, then fall back to the
next group.
For example, if you have two SMTP servers and you want to balance requests
between them and then fall back to Mailgun if both fail, configure priorities
like this:
```lang=json
[
{
"key": "smtp-uswest",
"type": "smtp",
"priority": 300,
"options": "..."
},
{
"key": "smtp-useast",
"type": "smtp",
"priority": 300,
"options": "..."
},
{
"key": "mailgun-fallback",
"type": "mailgun",
"options": "..."
}
}
```
-Phabricator will start with servers in the highest priority group (the group
+Phorge will start with servers in the highest priority group (the group
with the **largest** `priority` number). In this example, the highest group is
`300`, which has the two SMTP servers. They'll be tried in random order first.
-If both fail, Phabricator will move on to the next priority group. In this
+If both fail, Phorge will move on to the next priority group. In this
example, there are no other priority groups.
-If it still hasn't sent the mail, Phabricator will try servers which are not
+If it still hasn't sent the mail, Phorge will try servers which are not
in any priority group, in the configured order. In this example there is
only one such server, so it will try to send via Mailgun.
Message-ID Headers
==================
Email has a "Message-ID" header which is important for threading messages
-correctly in mail clients. Normally, Phabricator is free to select its own
+correctly in mail clients. Normally, Phorge is free to select its own
"Message-ID" header values for mail it sends.
However, some mailers (including Amazon SES) do not allow selection of custom
"Message-ID" values and will ignore or replace the "Message-ID" in mail that
is submitted through them.
-When Phabricator adds other mail headers which affect threading, like
+When Phorge adds other mail headers which affect threading, like
"In-Reply-To", it needs to know if its "Message-ID" headers will be respected
or not to select header values which will produce good threading behavior. If
we guess wrong and think we can set a "Message-ID" header when we can't, you
may get poor threading behavior in mail clients.
For most mailers (like Postmark, Mailgun, and Amazon SES), the correct setting
will be selected for you automatically, because the behavior of the mailer
is knowable ahead of time. For example, we know Amazon SES will never respect
our "Message-ID" headers.
However, if you're sending mail indirectly through a mailer like SMTP or
Sendmail, the mail might or might not be routing through some mail service
which will ignore or replace the "Message-ID" header.
For example, your local mailer might submit mail to Mailgun (so "Message-ID"
will work), or to Amazon SES (so "Message-ID" will not work), or to some other
mail service (which we may not know anything about). We can't make a reliable
guess about whether "Message-ID" will be respected or not based only on
the local mailer configuration.
By default, we check if the mailer has a hostname we recognize as belonging
to a service which does not allow us to set a "Message-ID" header. If we don't
recognize the hostname (which is very common, since these services are most
often configured against the localhost or some other local machine), we assume
we can set a "Message-ID" header.
If the outbound pathway does not actually allow selection of a "Message-ID"
header, you can set the `message-id` option on the mailer to `false` to tell
-Phabricator that it should not assume it can select a value for this header.
+Phorge that it should not assume it can select a value for this header.
For example, if you are sending mail via a local Postfix server which then
forwards the mail to Amazon SES (a service which does not allow selection of
-a "Message-ID" header), your `smtp` configuration in Phabricator should
+a "Message-ID" header), your `smtp` configuration in Phorge should
specify `"message-id": false`.
Next Steps
==========
Continue by:
- @{article:Configuring Inbound Email} so users can reply to email they
receive about revisions and tasks to interact with them; or
- learning about daemons with @{article:Managing Daemons with phd}; or
- returning to the @{article:Configuration Guide}.
diff --git a/src/docs/user/configuration/configuring_preamble.diviner b/src/docs/user/configuration/configuring_preamble.diviner
index 6b6b9da149..15ae20e75b 100644
--- a/src/docs/user/configuration/configuring_preamble.diviner
+++ b/src/docs/user/configuration/configuring_preamble.diviner
@@ -1,115 +1,115 @@
@title Configuring a Preamble Script
@group config
Adjust environmental settings (SSL, remote IPs) using a preamble script.
Overview
========
-If Phabricator is deployed in an environment where HTTP headers behave oddly
+If Phorge is deployed in an environment where HTTP headers behave oddly
(usually, because it is behind a load balancer), it may not be able to detect
some environmental features (like the client's IP, or the presence of SSL)
correctly.
You can use a special preamble script to make arbitrary adjustments to the
-environment and some parts of Phabricator's configuration in order to fix these
-problems and set up the environment which Phabricator expects.
+environment and some parts of Phorge's configuration in order to fix these
+problems and set up the environment which Phorge expects.
Creating a Preamble Script
==========================
To create a preamble script, write a file to:
- phabricator/support/preamble.php
+ phorge/support/preamble.php
-(This file is in Phabricator's `.gitignore`, so you do not need to worry about
+(This file is in Phorge's `.gitignore`, so you do not need to worry about
colliding with `git` or interacting with updates.)
This file should be a valid PHP script. If you aren't very familiar with PHP,
you can check for syntax errors with `php -l`:
- phabricator/ $ php -l support/preamble.php
+ phorge/ $ php -l support/preamble.php
No syntax errors detected in support/preamble.php
If present, this script will be executed at the very beginning of each web
request, allowing you to adjust the environment. For common adjustments and
examples, see the next sections.
Adjusting Client IPs
====================
-If your install is behind a load balancer, Phabricator may incorrectly detect
+If your install is behind a load balancer, Phorge may incorrectly detect
all requests as originating from the load balancer, rather than from the
correct client IPs.
In common cases where networks are configured like this, the `X-Forwarded-For`
header will have trustworthy information about the real client IP. You
can use the function `preamble_trust_x_forwarded_for_header()` in your
-preamble to tell Phabricator that you expect to receive requests from a
+preamble to tell Phorge that you expect to receive requests from a
load balancer or proxy which modifies this header:
```name="Trust X-Forwarded-For Header", lang=php
preamble_trust_x_forwarded_for_header();
```
You should do this //only// if the `X-Forwarded-For` header is known to be
trustworthy. In particular, if users can make requests to the web server
directly, they can provide an arbitrary `X-Forwarded-For` header, and thereby
spoof an arbitrary client IP.
The `X-Forwarded-For` header may also contain a list of addresses if a request
has been forwarded through multiple load balancers. If you know that requests
on your network are routed through `N` trustworthy devices, you can specify
that `N` to tell the function how many layers of `X-Forwarded-For` to discard:
```name="Trust X-Forwarded-For Header, Multiple Layers", lang=php
preamble_trust_x_forwarded_for_header(3);
```
If you have an unusual network configuration (for example, the number of
trustworthy devices depends on the network path) you can also implement your
own logic.
Note that this is very odd, advanced, and easy to get wrong. If you get it
wrong, users will most likely be able to spoof any client address.
```name="Custom X-Forwarded-For Handling", lang=php
if (isset($_SERVER['HTTP_X_FORWARDED_FOR'])) {
$raw_header = $_SERVER['X_FORWARDED_FOR'];
$real_address = your_custom_parsing_function($raw_header);
$_SERVER['REMOTE_ADDR'] = $real_address;
}
```
Adjusting SSL
=============
-If your install is behind an SSL terminating load balancer, Phabricator may
+If your install is behind an SSL terminating load balancer, Phorge may
detect requests as HTTP when the client sees them as HTTPS. This can cause
-Phabricator to generate links with the wrong protocol, issue cookies without
+Phorge to generate links with the wrong protocol, issue cookies without
the SSL-only flag, or reject requests outright.
To fix this, you can set `$_SERVER['HTTPS']` explicitly:
```
name=Explicitly Configure SSL Availability
<?php
$_SERVER['HTTPS'] = true;
```
-You can also set this value to `false` to explicitly tell Phabricator that a
+You can also set this value to `false` to explicitly tell Phorge that a
request is not an SSL request.
Next Steps
==========
Continue by:
- returning to the @{article:Configuration Guide}.
diff --git a/src/docs/user/configuration/custom_fields.diviner b/src/docs/user/configuration/custom_fields.diviner
index 75d83fc8ba..5879931317 100644
--- a/src/docs/user/configuration/custom_fields.diviner
+++ b/src/docs/user/configuration/custom_fields.diviner
@@ -1,216 +1,216 @@
@title Configuring Custom Fields
@group config
How to add custom fields to applications which support them.
= Overview =
-Several Phabricator applications allow the configuration of custom fields. These
+Several Phorge applications allow the configuration of custom fields. These
fields allow you to add more information to objects, and in some cases reorder
or remove builtin fields.
For example, you could use custom fields to add an "Estimated Hours" field to
tasks, a "Lead" field to projects, or a "T-Shirt Size" field to users.
These applications currently support custom fields:
| Application | Support |
|-------------|---------|
| Differential | Partial Support |
| Diffusion | Limited Support |
| Maniphest | Full Support |
| Owners | Full Support |
| People | Full Support |
| Projects | Full Support |
Custom fields can appear in many interfaces and support search, editing, and
other features.
= Basic Custom Fields =
To get started with custom fields, you can use configuration to select and
reorder fields and to add new simple fields.
If you don't need complicated display controls or sophisticated validation,
these simple fields should cover most use cases. They allow you to attach
things like strings, numbers, and dropdown menus to objects.
The relevant configuration settings are:
| Application | Add Fields | Select Fields |
|-------------|------------|---------------|
| Differential | Planned | `differential.fields` |
| Diffusion | Planned | Planned |
| Maniphest | `maniphest.custom-field-definitions` | `maniphest.fields` |
| Owners | `owners.custom-field-definitions` | `owners.fields` |
| People | `user.custom-field-definitions` | `user.fields` |
| Projects | `projects.custom-field-definitions` | `projects.fields` |
When adding fields, you'll specify a JSON blob like this (for example, as the
value of `maniphest.custom-field-definitions`):
{
"mycompany:estimated-hours": {
"name": "Estimated Hours",
"type": "int",
"caption": "Estimated number of hours this will take.",
"required": true
},
"mycompany:actual-hours": {
"name": "Actual Hours",
"type": "int",
"caption": "Actual number of hours this took."
},
"mycompany:company-jobs": {
"name": "Job Role",
"type": "select",
"options": {
"mycompany:engineer": "Engineer",
"mycompany:nonengineer": "Other"
}
},
"mycompany:favorite-dinosaur": {
"name": "Favorite Dinosaur",
"type": "text"
}
}
The fields will then appear in the other config option for the application
(for example, in `maniphest.fields`) and you can enable, disable, or reorder
them.
For details on how to define a field, see the next section.
= Custom Field Configuration =
When defining custom fields using a configuration option like
`maniphest.custom-field-definitions`, these options are available:
- **name**: Display label for the field on the edit and detail interfaces.
- **description**: Optional text shown when managing the field.
- **type**: Field type. The supported field types are:
- **int**: An integer, rendered as a text field.
- **text**: A string, rendered as a text field.
- **bool**: A boolean value, rendered as a checkbox.
- **select**: Allows the user to select from several options as defined
by **options**, rendered as a dropdown.
- **remarkup**: A text area which allows the user to enter markup.
- **users**: A typeahead which allows multiple users to be input.
- **date**: A date/time picker.
- **header**: Renders a visual divider which you can use to group fields.
- **link**: A text field which allows the user to enter a link.
- **edit**: Show this field on the application's edit interface (this
defaults to `true`).
- **view**: Show this field on the application's view interface (this
defaults to `true`). (Note: Empty fields are not shown.)
- **search**: Show this field on the application's search interface, allowing
users to filter objects by the field value.
- **fulltext**: Index the text in this field as part of the object's global
full-text index. This allows users to find the object by searching for
the field's contents using global search.
- **caption**: A caption to display underneath the field (optional).
- **required**: True if the user should be required to provide a value.
- **options**: If type is set to **select**, provide options for the dropdown
as a dictionary.
- **default**: Default field value.
- **strings**: Allows you to override specific strings based on the field
type. See below.
- **instructions**: Optional block of remarkup text which will appear
above the control when rendered on the edit view.
- **placeholder**: A placeholder text that appears on text boxes. Only
supported in text, int and remarkup fields (optional).
- **copy**: If true, this field's value will be copied when an object is
created using another object as a template.
- **limit**: For control types which use a tokenizer control to let the user
select a list of values, this limits how many values can be selected. For
example, a "users" field with a limit of "1" will behave like the "Owner"
field in Maniphest and only allow selection of a single user.
The `strings` value supports different strings per control type. They are:
- **bool**
- **edit.checkbox** Text for the edit interface, no default.
- **view.yes** Text for the view interface, defaults to "Yes".
- **search.default** Text for the search interface, defaults to "(Any)".
- **search.require** Text for the search interface, defaults to "Require".
-Internally, Phabricator implements some additional custom field types and
+Internally, Phorge implements some additional custom field types and
options. These are not intended for general use and are subject to abrupt
change, but are documented here for completeness:
- **Credentials**: Controls with type `credential` allow selection of a
Passphrase credential which provides `credential.provides`, and creation
of credentials of `credential.type`.
- **Datasource**: Controls with type `datasource` allow selection of tokens
from an arbitrary datasource, controlled with `datasource.class` and
`datasource.parameters`.
= Advanced Custom Fields =
If you want custom fields to have advanced behaviors (sophisticated rendering,
advanced validation, complicated controls, interaction with other systems, etc),
-you can write a custom field as an extension and add it to Phabricator.
+you can write a custom field as an extension and add it to Phorge.
NOTE: This API is somewhat new and fairly large. You should expect that there
will be occasional changes to the API requiring minor updates in your code.
To do this, extend the appropriate `CustomField` class for the application you
want to add a field to:
| Application | Extend |
|-------------|---------|
| Differential | @{class:DifferentialCustomField} |
| Diffusion | @{class:PhabricatorCommitCustomField} |
| Maniphest | @{class:ManiphestCustomField} |
| Owners | @{class:PhabricatorOwnersCustomField} |
| People | @{class:PhabricatorUserCustomField} |
| Projects | @{class:PhabricatorProjectCustomField} |
The easiest way to get started is to drop your subclass into
-`phabricator/src/extensions/`. If Phabricator is configured in development
+`phorge/src/extensions/`. If Phorge is configured in development
mode, the class should immediately be available in the UI. If not, you can
-restart Phabricator (for help, see @{article:Restarting Phabricator}).
+restart Phorge (for help, see @{article:Restarting Phorge}).
For example, this is a simple template which adds a custom field to Maniphest:
name=ExampleManiphestCustomField.php
<?php
final class ExampleCustomField extends ManiphestCustomField {
public function getFieldKey() {
return 'example:test';
}
public function shouldAppearInPropertyView() {
return true;
}
public function renderPropertyViewLabel() {
return pht('Example Custom Field');
}
public function renderPropertyViewValue(array $handles) {
return phutil_tag(
'h1',
array(
'style' => 'color: #ff00ff',
),
pht('It worked!'));
}
}
Broadly, you can then add features by overriding more methods and implementing
them. Many of the native fields are implemented on the custom field
architecture, and it may be useful to look at them. For details on available
integrations, see the base class for your application and
@{class:PhabricatorCustomField}.
= Next Steps =
Continue by:
- - learning more about extending Phabricator with custom code in
- @{article@phabcontrib:Adding New Classes};
+ - learning more about extending Phorge with custom code in
+ @{article@contrib:Adding New Classes};
- or returning to the @{article: Configuration Guide}.
diff --git a/src/docs/user/configuration/managing_caches.diviner b/src/docs/user/configuration/managing_caches.diviner
index e873b99d8c..c4e209fc25 100644
--- a/src/docs/user/configuration/managing_caches.diviner
+++ b/src/docs/user/configuration/managing_caches.diviner
@@ -1,47 +1,47 @@
@title Managing Caches
@group config
-Discusses Phabricator caches and cache management.
+Discusses Phorge caches and cache management.
Overview
========
-Phabricator uses various caches to improve performance, similar to the caches
+Phorge uses various caches to improve performance, similar to the caches
a web browser uses to improve web performance.
In particular, blocks of text which are expensive to render (like formatted
text and syntax highlighted code) are often cached after they're rendered for
-the first time. When they're rendered again, Phabricator can read the cache
+the first time. When they're rendered again, Phorge can read the cache
instead of recomputing the result.
Because text is cached, you may continue to see the old result even after you
make certain configuration changes which should affect it. The most common
example of this is that if you enable syntax highlighting with Pygments, old
diffs and pastes may not appear highlighted.
You may also run into this issue if you modify existing Remarkup rules or
develop new ones, or modify other parts of the code that run before the results
are cached.
Caches will naturally expire over time, so if they aren't causing a problem
you can just ignore the out of date caches and they'll fix themselves
eventually (usually within 30 days).
If you don't want to wait, you can purge the caches. This will remove any
-cached data and force Phabricator to recompute the results.
+cached data and force Phorge to recompute the results.
Purging Caches
==============
-If you need to purge Phabricator's caches, you can use the CLI tool. Run it
+If you need to purge Phorge's caches, you can use the CLI tool. Run it
with the `--help` flag to see options:
- phabricator/ $ ./bin/cache purge --help
+ phorge/ $ ./bin/cache purge --help
This tool can purge caches in a granular way, but it's normally easiest to
just purge all of the caches:
- phabricator/ $ ./bin/cache purge --all
+ phorge/ $ ./bin/cache purge --all
You can purge caches safely. The data they contain can always be rebuilt from
-other data if Phabricator needs it.
+other data if Phorge needs it.
diff --git a/src/docs/user/configuration/managing_daemons.diviner b/src/docs/user/configuration/managing_daemons.diviner
index cf2ba85ea2..0e56a95995 100644
--- a/src/docs/user/configuration/managing_daemons.diviner
+++ b/src/docs/user/configuration/managing_daemons.diviner
@@ -1,131 +1,131 @@
@title Managing Daemons with phd
@group config
-Explains Phabricator daemons and the daemon control program `phd`.
+Explains Phorge daemons and the daemon control program `phd`.
= Overview =
-Phabricator uses daemons (background processing scripts) to handle a number of
+Phorge uses daemons (background processing scripts) to handle a number of
tasks:
- tracking repositories, discovering new commits, and importing and parsing
commits;
- sending email; and
- collecting garbage, like old logs and caches.
Daemons are started and stopped with **phd** (the **Ph**abricator **D**aemon
launcher). Daemons can be monitored via a web console.
-You do not need to run daemons for most parts of Phabricator to work, but some
+You do not need to run daemons for most parts of Phorge to work, but some
features (principally, repository tracking with Diffusion) require them and
several features will benefit in performance or stability if you configure
daemons.
= phd =
-**phd** is a command-line script (located at `phabricator/bin/phd`). To get
+**phd** is a command-line script (located at `phorge/bin/phd`). To get
a list of commands, run `phd help`:
- phabricator/ $ ./bin/phd help
+ phorge/ $ ./bin/phd help
NAME
- phd - phabricator daemon launcher
+ phd - phorge daemon launcher
...
Generally, you will use:
- **phd start** to launch all daemons;
- **phd restart** to restart all daemons;
- **phd status** to get a list of running daemons; and
- **phd stop** to stop all daemons.
If you want finer-grained control, you can use:
- **phd launch** to launch individual daemons; and
- **phd debug** to debug problems with daemons.
-NOTE: When you upgrade Phabricator or change configuration, you should restart
+NOTE: When you upgrade Phorge or change configuration, you should restart
the daemons by running `phd restart`.
= Daemon Console =
You can view status and debugging information for daemons in the Daemon Console
via the web interface. Go to `/daemon/` in your install or click
**Daemon Console** from "More Stuff".
The Daemon Console shows a list of all the daemons that have ever launched, and
allows you to view log information for them. If you have issues with daemons,
you may be able to find error information that will help you resolve the problem
in the console.
NOTE: The easiest way to figure out what's wrong with a daemon is usually to use
**phd debug** to launch it instead of **phd start**. This will run it without
daemonizing it, so you can see output in your console.
= Available Daemons =
You can get a list of launchable daemons with **phd list**:
- **test daemons** are not generally useful unless you are
developing daemon infrastructure or debugging a daemon problem;
- **PhabricatorTaskmasterDaemon** performs work from a task queue;
- **PhabricatorRepositoryPullLocalDaemon** daemons track repositories, for
more information see @{article:Diffusion User Guide}; and
- **PhabricatorTriggerDaemon** schedules event triggers and cleans up old
logs and caches.
= Debugging and Tuning =
In most cases, **phd start** handles launching all the daemons you need.
However, you may want to use more granular daemon controls to debug daemons,
launch custom daemons, or launch special daemons like the IRC bot.
To debug a daemon, use `phd debug`:
- phabricator/bin/ $ ./phd debug <daemon>
+ phorge/bin/ $ ./phd debug <daemon>
You can pass arguments like this (normal arguments are passed to the daemon
control mechanism, not to the daemon itself):
- phabricator/bin/ $ ./phd debug <daemon> -- --flavor apple
+ phorge/bin/ $ ./phd debug <daemon> -- --flavor apple
In debug mode, daemons do not daemonize, and they print additional debugging
output to the console. This should make it easier to debug problems. You can
terminate the daemon with `^C`.
To launch a nonstandard daemon, use `phd launch`:
- phabricator/bin/ $ ./phd launch <daemon>
+ phorge/bin/ $ ./phd launch <daemon>
This daemon will daemonize and run normally.
== General Tips ==
- You can set the maximum number of taskmasters that will run at once
by adjusting `phd.taskmasters`. If you have a task backlog, try increasing
it.
- When you `phd launch` or `phd debug` a daemon, you can type any unique
substring of its name, so `phd launch pull` will work correctly.
- `phd stop` and `phd restart` stop **all** of the daemons on the machine, not
just those started with `phd start`. If you're writing a restart script,
have it launch any custom daemons explicitly after `phd restart`.
- You can write your own daemons and manage them with `phd` by extending
- @{class:PhabricatorDaemon}. See @{article@phabcontrib:Adding New Classes}.
+ @{class:PhabricatorDaemon}. See @{article@contrib:Adding New Classes}.
- See @{article:Diffusion User Guide} for details about tuning the repository
daemon.
Multiple Hosts
==============
For information about running daemons on multiple hosts, see
@{article:Cluster: Daemons}.
Next Steps
==========
Continue by:
- learning about the repository daemon with @{article:Diffusion User Guide};
or
- - writing your own daemons with @{article@phabcontrib:Adding New Classes}.
+ - writing your own daemons with @{article@contrib:Adding New Classes}.
diff --git a/src/docs/user/configuration/managing_garbage.diviner b/src/docs/user/configuration/managing_garbage.diviner
index 0b18bd2a0a..5c5ef87762 100644
--- a/src/docs/user/configuration/managing_garbage.diviner
+++ b/src/docs/user/configuration/managing_garbage.diviner
@@ -1,68 +1,68 @@
@title Managing Garbage Collection
@group config
Understanding and configuring garbage collection.
Overview
========
-Phabricator generates various logs and caches during normal operation. Some of
+Phorge generates various logs and caches during normal operation. Some of
these logs and caches are usually of very little use after some time has
passed, so they are deleted automatically (often after a month or two) in a
process called "garbage collection".
Garbage collection is performed automatically by the daemons. You can review
all of the installed garbage collectors by browsing to {nav Config > Garbage
Collectors}.
Configuring Retention Policies
==============================
You can reconfigure the data retention policies for most collectors.
The default retention policies should be suitable for most installs. However,
you might want to **decrease** retention to reduce the amount of disk space
used by some high-volume log that you don't find particularly interesting, or
to adhere to an organizational data retention policy.
Alternatively, you might want to **increase** retention if you want to retain
some logs for a longer period of time, perhaps for auditing or analytic
purposes.
You can review the current retention policies in
{nav Config > Garbage Collectors}. To change a policy, use
`bin/garbage set-policy` to select a new policy:
```
-phabricator/ $ ./bin/garbage set-policy --collector cache.markup --days 7
+phorge/ $ ./bin/garbage set-policy --collector cache.markup --days 7
```
You can use `--days` to select how long data is retained for. You can also use
`--indefinite` to set an indefinite retention policy. This will stop the
garbage collector from cleaning up any data. Finally, you can use `--default`
to restore the default policy.
Your changes should be reflected in the web UI immediately, and will take
effect in the actual collector **the next time the daemons are restarted**.
Troubleshooting
===============
You can manually run a collector with `bin/garbage collect`.
```
-phabricator/ $ ./bin/garbage collect --collector cache.general
+phorge/ $ ./bin/garbage collect --collector cache.general
```
By using the `--trace` flag, you can inspect the operation of the collector
in detail.
Next Steps
==========
Continue by:
- exploring other daemon topics with @{article:Managing Daemons with phd}.
diff --git a/src/docs/user/configuration/notifications.diviner b/src/docs/user/configuration/notifications.diviner
index 13d93317f9..bd1835c294 100644
--- a/src/docs/user/configuration/notifications.diviner
+++ b/src/docs/user/configuration/notifications.diviner
@@ -1,280 +1,280 @@
@title Notifications User Guide: Setup and Configuration
@group config
Guide to setting up notifications.
Overview
========
-By default, Phabricator delivers information about events (like users creating
+By default, Phorge delivers information about events (like users creating
tasks or commenting on code reviews) through email and in-application
notifications.
-Phabricator can also be configured to deliver notifications in real time, by
+Phorge can also be configured to deliver notifications in real time, by
popping up a message in any open browser windows if something has happened or
an object has been updated.
To enable real-time notifications:
- Configure and start the notification server, as described below.
- Adjust `notification.servers` to point at it.
This document describes the process in detail.
Supported Browsers
==================
Notifications are supported for browsers which support WebSockets. This covers
most modern browsers (like Chrome, Firefox, Safari, and recent versions of
Internet Explorer) and many mobile browsers.
IE8 and IE9 do not support WebSockets, so real-time notifications won't work in
those browsers.
Installing Node and Modules
===========================
The notification server uses Node.js, so you'll need to install it first.
To install Node.js, follow the instructions on
[[ http://nodejs.org | nodejs.org ]].
You will also need to install the `ws` module for Node. This needs to be
installed into the notification server directory:
- phabricator/ $ cd support/aphlict/server/
- phabricator/support/aphlict/server/ $ npm install ws
+ phorge/ $ cd support/aphlict/server/
+ phorge/support/aphlict/server/ $ npm install ws
Once Node.js and the `ws` module are installed, you're ready to start the
server.
Running the Aphlict Server
==========================
After installing Node.js, you can control the notification server with the
`bin/aphlict` command. To start the server:
- phabricator/ $ bin/aphlict start
+ phorge/ $ bin/aphlict start
By default, the server must be able to listen on port `22280`. If you're using
a host firewall (like a security group in EC2), make sure traffic can reach the
server.
The server configuration is controlled by a configuration file, which is
-separate from Phabricator's configuration settings. The default file can
-be found at `phabricator/conf/aphlict/aphlict.default.json`.
+separate from Phorge's configuration settings. The default file can
+be found at `phorge/conf/aphlict/aphlict.default.json`.
To make adjustments to the default configuration, either copy this file to
create `aphlict.custom.json` in the same directory (this file will be used if
it exists) or specify a configuration file explicitly with the `--config` flag:
- phabricator/ $ bin/aphlict start --config path/to/config.json
+ phorge/ $ bin/aphlict start --config path/to/config.json
The configuration file has these settings:
- `servers`: //Required list.// A list of servers to start.
- `logs`: //Optional list.// A list of logs to write to.
- `cluster`: //Optional list.// A list of cluster peers. This is an advanced
feature.
- `pidfile`: //Required string.// Path to a PID file.
- `memory.hint`: //Optional int.// Suggestion to `node` about how much
memory to use, via `--max-old-stack-size`. In most cases, this can be
left unspecified.
Each server in the `servers` list should be an object with these keys:
- `type`: //Required string.// The type of server to start. Options are
`admin` or `client`. Normally, you should run one of each.
- `port`: //Required int.// The port this server should listen on.
- `listen`: //Optional string.// Which interface to bind to. By default,
the `admin` server is bound to `127.0.0.1` (so only other services on the
local machine can connect to it), while the `client` server is bound
to `0.0.0.0` (so any client can connect).
- `ssl.key`: //Optional string.// If you want to use SSL on this port,
the path to an SSL key.
- `ssl.cert`: //Optional string.// If you want to use SSL on this port,
the path to an SSL certificate.
- `ssl.chain`: //Optional string.// If you have configured SSL on this
port, an optional path to a certificate chain file.
Each log in the `logs` list should be an object with these keys:
- `path`: //Required string.// Path to the log file.
Each peer in the `cluster` list should be an object with these keys:
- `host`: //Required string.// The peer host address.
- `port`: //Required int.// The peer port.
- `protocol`: //Required string.// The protocol to connect with, one of
`"http"` or `"https"`.
Cluster configuration is an advanced topic and can be omitted for most
installs. For more information on how to configure a cluster, see
@{article:Clustering Introduction} and @{article:Cluster: Notifications}.
The defaults are appropriate for simple cases, but you may need to adjust them
if you are running a more complex configuration.
-Configuring Phabricator
+Configuring Phorge
=======================
-After starting the server, configure Phabricator to connect to it by adjusting
+After starting the server, configure Phorge to connect to it by adjusting
`notification.servers`. This configuration option should have a list of servers
-that Phabricator should interact with.
+that Phorge should interact with.
Normally, you'll list one client server and one admin server, like this:
```lang=json
[
{
"type": "client",
- "host": "phabricator.mycompany.com",
+ "host": "phorge.mycompany.com",
"port": 22280,
"protocol": "https"
},
{
"type": "admin",
"host": "127.0.0.1",
"port": 22281,
"protocol": "http"
}
]
```
This definition defines which services the user's browser will attempt to
connect to. Most of the time, it will be very similar to the services defined
in the Aphlict configuration. However, if you are sending traffic through a
load balancer or terminating SSL somewhere before traffic reaches Aphlict,
the services the browser connects to may need to have different hosts, ports
or protocols than the underlying server listens on.
Verifying Server Status
=======================
After configuring `notification.servers`, navigate to
{nav Config > Services > Notification Servers} to verify that things are
operational.
Troubleshooting
===============
You can run `aphlict` in the foreground to get output to your console:
- phabricator/ $ ./bin/aphlict debug
+ phorge/ $ ./bin/aphlict debug
Because the notification server uses WebSockets, your browser error console
may also have information that is useful in figuring out what's wrong.
The server also generates a log, by default in `/var/log/aphlict.log`. You can
change this location by adjusting configuration. The log may contain
information that is useful in resolving issues.
SSL and HTTPS
=============
-If you serve Phabricator over HTTPS, you must also serve websockets over HTTPS.
+If you serve Phorge over HTTPS, you must also serve websockets over HTTPS.
Browsers will refuse to connect to `ws://` websockets from HTTPS pages.
-If a client connects to Phabricator over HTTPS, Phabricator will automatically
+If a client connects to Phorge over HTTPS, Phorge will automatically
select an appropriate HTTPS service from `notification.servers` and instruct
the browser to open a websocket connection with `wss://`.
The simplest way to do this is configure Aphlict with an SSL key and
certificate and let it terminate SSL directly.
If you prefer not to do this, two other options are:
- run the websocket through a websocket-capable loadbalancer and terminate
SSL there; or
- run the websocket through `nginx` over the same socket as the rest of
your web traffic.
See the next sections for more detail.
Terminating SSL with a Load Balancer
====================================
If you want to terminate SSL in front of the notification server with a
traditional load balancer or a similar device, do this:
- Point `notification.servers` at your load balancer or reverse proxy,
specifying that the protocol is `https`.
- On the load balancer or proxy, terminate SSL and forward traffic to the
Aphlict server.
- In the Aphlict configuration, listen on the target port with `http`.
Terminating SSL with Nginx
==========================
If you use `nginx`, you can send websocket traffic to the same port as normal
HTTP traffic and have `nginx` proxy it selectively based on the request path.
This requires `nginx` 1.3 or greater. See the `nginx` documentation for
details:
> http://nginx.com/blog/websocket-nginx/
This is very complex, but allows you to support notifications without opening
additional ports.
An example `nginx` configuration might look something like this:
```lang=nginx, name=/etc/nginx/conf.d/connection_upgrade.conf
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
```
```lang=nginx, name=/etc/nginx/conf.d/websocket_pool.conf
upstream websocket_pool {
ip_hash;
server 127.0.0.1:22280;
}
```
-```lang=nginx, name=/etc/nginx/sites-enabled/phabricator.example.com.conf
+```lang=nginx, name=/etc/nginx/sites-enabled/phorge.example.com.conf
server {
- server_name phabricator.example.com;
- root /path/to/phabricator/webroot;
+ server_name phorge.example.com;
+ root /path/to/phorge/webroot;
// ...
location = /ws/ {
proxy_pass http://websocket_pool;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 999999999;
}
}
```
With this approach, you should make these additional adjustments:
-**Phabricator Configuration**: The entry in `notification.servers` with type
+**Phorge Configuration**: The entry in `notification.servers` with type
`"client"` should have these adjustments made:
- - Set `host` to the Phabricator host.
+ - Set `host` to the Phorge host.
- Set `port` to the standard HTTPS port (usually `443`).
- Set `protocol` to `"https"`.
- Set `path` to `/ws/`, so it matches the special `location` in your
`nginx` config.
You do not need to adjust the `"admin"` server.
**Aphlict**: Your Aphlict configuration should make these adjustments to
the `"client"` server:
- Do not specify any `ssl.*` options: `nginx` will send plain HTTP traffic
to Aphlict.
- Optionally, you can `listen` on `127.0.0.1` instead of `0.0.0.0`, because
the server will no longer receive external traffic.
diff --git a/src/docs/user/configuration/storage_adjust.diviner b/src/docs/user/configuration/storage_adjust.diviner
index 2403cd3fed..e460ddd041 100644
--- a/src/docs/user/configuration/storage_adjust.diviner
+++ b/src/docs/user/configuration/storage_adjust.diviner
@@ -1,191 +1,191 @@
@title Managing Storage Adjustments
@group config
Explains how to apply storage adjustments to the MySQL schemata.
Overview
========
-Phabricator uses a workflow called //storage adjustment// to make some minor
+Phorge uses a workflow called //storage adjustment// to make some minor
kinds of changes to the MySQL schema. This workflow compliments the //storage
upgrade// workflow, which makes major changes.
You can perform storage adjustment by running:
- phabricator/ $ ./bin/storage adjust
+ phorge/ $ ./bin/storage adjust
This document describes what adjustments are, how they relate to storage
upgrades, how to perform them, and how to troubleshoot issues with storage
adjustment.
Understanding Adjustments
===================
-Storage adjustments make minor changes to the Phabricator MySQL schemata to
+Storage adjustments make minor changes to the Phorge MySQL schemata to
improve consistency, unicode handling, and performance. Changes covered by
adjustment include:
- Character set and collation settings for columns, tables, and databases.
- Setting and removing "Auto Increment" on columns.
- Adding, removing, renaming and adjusting keys.
Adjustment does not make major changes to the schemata, like creating or
removing columns or tables or migrating data. (Major changes are performed by
the upgrade workflow.)
Adjustments are separate from upgrades primarily because adjustments depend on
the MySQL version, while upgrades do not. If you update MySQL, better collations
may become available, and the adjustment workflow will convert your schemata to
use them.
All changes covered by adjustment are minor, and technically optional. However,
you are strongly encouraged to apply outstanding adjustments: if you do not,
you may encounter issues storing or sorting some unicode data, and may suffer
poor performance on some queries.
Reviewing Outstanding Adjustments
=================================
There are two ways to review outstanding adjustments: you can use the web UI,
or you can use the CLI.
To access the web UI, navigate to {nav Config > Database Status} or
{nav Config > Database Issues}. The //Database Status// panel provides a general
overview of all schemata. The //Database Issues// panel shows outstanding
issues.
These interfaces report //Errors//, which are serious issues that can not be
resolved through adjustment, and //Warnings//, which are minor issues that the
adjustment workflow can resolve.
You can also review adjustments from the CLI, by running:
- phabricator/ $ ./bin/storage adjust
+ phorge/ $ ./bin/storage adjust
Before you're prompted to actually apply adjustments, you'll be given a list of
available adjustments. You can then make a choice to apply them.
Performing Adjustments
======================
To perform adjustments, run the `adjust` workflow:
- phabricator/ $ ./bin/storage adjust
+ phorge/ $ ./bin/storage adjust
For details about flags, use:
- phabricator/ $ ./bin/storage help adjust
+ phorge/ $ ./bin/storage help adjust
You do not normally need to run this workflow manually: it will be run
automatically after you run the `upgrade` workflow.
History and Rationale
=====================
The primary motivation for the adjustment workflow is MySQL's handling of
unicode character sets. Before MySQL 5.5, MySQL supports a character set called
`utf8`. However, this character set can not store 4-byte unicode characters
(including emoji). Inserting 4-byte characters into a `utf8` column truncates
the data.
With MySQL 5.5, a new `utf8mb4` character set was introduced. This character
set can safely store 4-byte unicode characters.
The adjustment workflow allows us to alter the schema to primarily use
`binary` character sets on older MySQL, and primarily use `utf8mb4` character
-sets on newer MySQL. The net effect is that Phabricator works consistently and
+sets on newer MySQL. The net effect is that Phorge works consistently and
can store 4-byte unicode characters regardless of the MySQL version. Under
newer MySQL, we can also take advantage of the better collation rules the
`utf8mb4` character set offers.
The adjustment workflow was introduced in November 2014. If your install
predates its introduction, your first adjustment may take a long time (we must
convert all of the data out of `utf8` and into the appropriate character set).
If your install was set up after November 2014, adjustments should generally
be very minor and complete quickly, unless you perform a major MySQL update and
make new character sets available.
If you plan to update MySQL from an older version to 5.5 or newer, it is
advisable to update first, then run the adjustment workflow. If you adjust
first, you'll need to adjust again after updating, so you'll end up spending
twice as much time performing schemata adjustments.
Troubleshooting
===============
When you apply adjustments, some adjustments may fail. Some of the most common
errors you may encounter are:
- **#1406 Data Too Long**: Usually this is caused by a very long object name
(like a task title) which contains multibyte unicode characters. When the
column type is converted to `binary`, only the first part of the title still
fits in the column. Depending on what is failing, you may be able to find
the relevant object in the web UI and retitle it so the adjustment succeeds.
Alternatively, you can use `--unsafe` to force the adjustment to truncate
the title. This will destroy some data, but usually the data is not
important (just the end of very long titles).
- **#1366 Incorrect String Value**: This can occur when converting invalid
or truncated multibyte unicode characters to a unicode character set.
In both cases, the old value can not be represented under the new character
set. You may be able to identify the object and edit it to allow the
adjustment to proceed, or you can use the `--unsafe` flag to truncate the
data at the invalid character. Usually, the truncated data is not important.
As with most commands, you can add the `--trace` flag to get more details about
what `bin/storage adjust` is doing. This may help you diagnose or understand any
issues you encounter, and this data is useful if you file reports in the
upstream.
In general, adjustments are not critical. If you run into issues applying
adjustments, it is safe to file a task in the upstream describing the problem
-you've encountered and continue using Phabricator normally until the issue can
+you've encountered and continue using Phorge normally until the issue can
be resolved.
Surplus Schemata
================
After performing adjustment, you may receive an error that a table or column is
"Surplus". The error looks something like this:
| Target | Error |
| --- | --- |
-| phabricator_example.example_table | Surplus |
+| phorge_example.example_table | Surplus |
-Generally, "Surplus" means that Phabricator does not expect the table or column
+Generally, "Surplus" means that Phorge does not expect the table or column
to exist. These surpluses usually exist because you (or someone else
with database access) added the table or column manually. Rarely, they can
also exist for other reasons. They are usually safe to delete, but because
-deleting them destroys data and Phabricator can not be sure that the table or
+deleting them destroys data and Phorge can not be sure that the table or
column doesn't have anything important in it, it does not delete them
automatically.
If you recognize the schema causing the issue as something you added and you
don't need it anymore, you can safely delete it. If you aren't sure whether
you added it or not, you can move the data somewhere else and delete it later.
To move a table, first create a database for it like `my_backups`. Then, rename
the table to move it into that database (use the table name given in the error
message):
```lang=sql
CREATE DATABASE my_backups;
-RENAME TABLE phabricator_example.example_table
+RENAME TABLE phorge_example.example_table
TO my_backups.example_table;
```
-Phabricator will ignore tables that aren't in databases it owns, so you can
-safely move anything you aren't sure about outside of the Phabricator databases.
+Phorge will ignore tables that aren't in databases it owns, so you can
+safely move anything you aren't sure about outside of the Phorge databases.
If you're sure you don't need a table, use `DROP TABLE` to destroy it,
specifying the correct table name (the one given in the error message):
```lang=sql
-DROP TABLE phabricator_example.example_table;
+DROP TABLE phorge_example.example_table;
```
This will destroy the table permanently.
diff --git a/src/docs/user/configuration/troubleshooting_https.diviner b/src/docs/user/configuration/troubleshooting_https.diviner
index bdc3439d7d..b3aae81035 100644
--- a/src/docs/user/configuration/troubleshooting_https.diviner
+++ b/src/docs/user/configuration/troubleshooting_https.diviner
@@ -1,80 +1,80 @@
@title Troubleshooting HTTPS
@group config
Detailed instructions for troubleshooting HTTPS connection problems.
= Overview =
-If you're having trouble connecting to an HTTPS install of Phabricator, and
+If you're having trouble connecting to an HTTPS install of Phorge, and
particularly if you're receiving a "There was an error negotiating the SSL
connection." error, this document may be able to help you diagnose and resolve
the problem.
Connection negotiation can fail for several reasons. The major ones are:
- You have not added the Certificate Authority as a trusted authority
(this is the most common problem, and usually the issue for self-signed
certificates).
- The SSL certificate is signed for the wrong domain. For example, a
certificate signed for `www.example.com` will not work for
- `phabricator.example.com`.
+ `phorge.example.com`.
- The server rejects TLSv1 SNI connections for the domain (this is
complicated, see below).
= Certificate Authority Problems =
SSL certificates need to be signed by a trusted authority (called a Certificate
Authority or "CA") to be accepted. If the CA for a certificate is untrusted, the
connection will fail (this defends the connection from an eavesdropping attack
called "man in the middle"). Normally, you purchase a certificate from a known
authority and clients have a list of trusted authorities.
You can self-sign a certificate by creating your own CA, but clients will not
trust it by default. They need to add the CA as a trusted authority.
For instructions on adding CAs, see `arcanist/resources/ssl/README`.
If you'd prefer that `arc` not verify the identity of the server whatsoever, you
can use the `https.blindly-trust-domains` setting. This will make it
dramatically easier for adversaries to perform certain types of attacks, and is
**strongly discouraged**:
$ arc set-config https.blindly-trust-domains '["example.com"]'
= Domain Problems =
Verify the domain the certificate was issued for. You can generally do this
with:
$ openssl x509 -text -in <certificate>
If the certificate was accidentally generated for, e.g. `www.example.com` but
-you installed Phabricator on `phabricator.example.com`, you need to generate a
+you installed Phorge on `phorge.example.com`, you need to generate a
new certificate for the right domain.
= SNI Problems =
Server Name Identification ("SNI") is a feature of TLSv1 which works a bit like
Apache VirtualHosts, and allows a server to present different certificates to
clients who are connecting to it using different names.
Servers that are not configured properly may reject TSLv1 SNI requests because
they do not recognize the name the client is connecting with. This
topic is complicated, but you can test for it by running:
$ openssl s_client -connect example.com:443 -servername example.com
Replace **both** instances of "example.com" with your domain. If you receive
an error in `SSL23_GET_SERVER_HELLO` with `reason(1112)`, like this:
CONNECTED(00000003)
87871:error:14077458:SSL routines:SSL23_GET_SERVER_HELLO:reason(1112):
/SourceCache/OpenSSL098/OpenSSL098-44/src/ssl/s23_clnt.c:602:
...it indicates server is misconfigured. The most common cause of this problem
-is an Apache server that does not explicitly name the Phabricator domain as a
+is an Apache server that does not explicitly name the Phorge domain as a
valid VirtualHost.
This error occurs only for some versions of the OpenSSL client library
(from v0.9.8r or earlier until 1.0.0), so only some users may experience it.
diff --git a/src/docs/user/feedback.diviner b/src/docs/user/feedback.diviner
deleted file mode 100644
index 0998002b10..0000000000
--- a/src/docs/user/feedback.diviner
+++ /dev/null
@@ -1,7 +0,0 @@
-@title Give Feedback! Get Support!
-@short Feedback/Support
-@group cellar
-
-Deprecated.
-
-This article has moved to @{article:Support Resources}.
diff --git a/src/docs/user/field/conduit_changes.diviner b/src/docs/user/field/conduit_changes.diviner
index 96a890600a..b2246310e3 100644
--- a/src/docs/user/field/conduit_changes.diviner
+++ b/src/docs/user/field/conduit_changes.diviner
@@ -1,59 +1,59 @@
@title Managing Conduit Changes
@group fieldmanual
Help with managing Conduit API changes.
Overview
========
Many parts of the Conduit API are stable, but some parts are subject to change.
For example, when we write a new application, it usually adds several new API
methods and may update older methods.
This document discusses API stability and how to minimize disruption when
transitioning between API versions.
Method Statuses
===============
Methods have one of three statuses:
- **Unstable**: This is a new or experimental method which is subject to
change. You may call these methods to get access to recently released
features, but should expect that you may need to adjust your usage of
them before they stabilize.
- **Stable**: This is an established method which generally will not change.
- **Deprecated**: This method will be removed in a future version of
- Phabricator and callers should cease using it.
+ Phorge and callers should cease using it.
Normally, a method is deprecated only when it is obsolete or a new, more
powerful method is available to replace it.
Finding Deprecated Calls
========================
You can identify calls to deprecated methods in {nav Conduit > Call Logs}.
Use {nav My Deprecated Calls} to find calls to deprecated methods you have
made, and {nav Deprecated Call Logs} to find deprecated calls by all users.
You can also search for calls by specific users. For example, it may be useful
to search for any bot accounts you run to make sure they aren't calling
outdated APIs.
The most common cause of calls to deprecated methods is users running very
old versions of Arcanist. They can normally upgrade by running `arc upgrade`.
When the changelogs mention a method deprecation, you can use the call logs
to identify callers and notify them to upgrade or switch away. When the
changelogs mention a method removal, you can use the call logs to verify that
you will not be impacted.
Next Steps
==========
Continue by:
- returning to @{article:Conduit API Overview}.
diff --git a/src/docs/user/field/darkconsole.diviner b/src/docs/user/field/darkconsole.diviner
index 065be2d8f1..494b175416 100644
--- a/src/docs/user/field/darkconsole.diviner
+++ b/src/docs/user/field/darkconsole.diviner
@@ -1,181 +1,181 @@
@title Using DarkConsole
@group fieldmanual
Enabling and using the built-in debugging and performance console.
Overview
========
-DarkConsole is a debugging console built into Phabricator which exposes
+DarkConsole is a debugging console built into Phorge which exposes
configuration, performance and error information. It can help you detect,
-understand and resolve bugs and performance problems in Phabricator
+understand and resolve bugs and performance problems in Phorge
applications.
Security Warning
================
WARNING: Because DarkConsole exposes some configuration and debugging
information, it is disabled by default and you should be cautious about
enabling it in production.
Particularly, DarkConsole may expose some information about your session
details or other private material. It has some crude safeguards against this,
but does not completely sanitize output.
This is mostly a risk if you take screenshots or copy/paste output and share
it with others.
Enabling DarkConsole
====================
You enable DarkConsole in your configuration, by setting `darkconsole.enabled`
to `true`, and then turning it on in {nav Settings > Developer Settings}.
Once DarkConsole is enabled, you can show or hide it by pressing ##`## on your
keyboard.
Since the setting is not available to logged-out users, you can also set
`darkconsole.always-on` if you need to access DarkConsole on logged-out pages.
DarkConsole has a number of tabs, each of which is powered by a "plugin". You
can use them to access different debugging and performance features.
Plugin: Error Log
=================
The "Error Log" plugin shows errors that occurred while generating the page,
similar to the httpd `error.log`. You can send information to the error log
explicitly with the @{function@arcanist:phlog} function.
If errors occurred, a red dot will appear on the plugin tab.
Plugin: Request
===============
The "Request" plugin shows information about the HTTP request the server
received, and the server itself.
Plugin: Services
================
The "Services" plugin lists calls a page made to external services, like
MySQL and subprocesses.
The Services tab can help you understand and debug issues related to page
behavior: for example, you can use it to see exactly what queries or commands a
page is running. In some cases, you can re-run those queries or commands
yourself to examine their output and look for problems.
This tab can also be particularly useful in understanding page performance,
because many performance problems are caused by inefficient queries (queries
with bad query plans or which take too long) or repeated queries (queries which
could be better structured or benefit from caching).
When analyzing performance problems, the major things to look for are:
**Summary**: In the summary table at the top of the tab, are any categories
of events dominating the performance cost? For normal pages, the costs should
be roughly along these lines:
| Event Type | Approximate Cost |
|---|---|
| Connect | 1%-10% |
| Query | 10%-40% |
| Cache | 1% |
| Event | 1% |
| Conduit | 0%-80% |
| Exec | 0%-80% |
| All Services | 10%-75% |
| Entire Page | 100ms - 1000ms |
These ranges are rough, but should usually be what you expect from a page
summary. If any of these numbers are way off (for example, "Event" is taking
50% of runtime), that points toward a possible problem in that section of the
code, and can guide you to examining the related service calls more carefully.
**Duration**: In the Duration column, look for service calls that take a long
time. Sometimes these calls are just what the page is doing, but sometimes they
may indicate a problem.
Some questions that may help understanding this column are: are there a small
number of calls which account for a majority of the total page generation time?
Do these calls seem fundamental to the behavior of the page, or is it not clear
why they need to be made? Do some of them seem like they could be cached?
If there are queries which look slow, using the "Analyze Query Plans" button
may help reveal poor query plans.
Generally, this column can help pinpoint these kinds of problems:
- Queries or other service calls which are huge and inefficient.
- Work the page is doing which it could cache instead.
- Problems with network services.
- Missing keys or poor query plans.
**Repeated Calls**: In the "Details" column, look for service calls that are
being made over and over again. Sometimes this is normal, but usually it
indicates a call that can be batched or cached.
Some things to look for are: are similar calls being made over and over again?
Do calls mostly make sense given what the page is doing? Could any calls be
cached? Could multiple small calls be collected into one larger call? Are any
of the service calls clearly goofy nonsense that shouldn't be happening?
Generally, this column can help pinpoint these kinds of problems:
- Unbatched queries which should be batched (see
@{article:Performance: N+1 Query Problem}).
- Opportunities to improve performance with caching.
- General goofiness in how service calls are working.
If the services tab looks fine, and particularly if a page is slow but the
"All Services" cost is small, that may indicate a problem in PHP. The best
tool to understand problems in PHP is XHProf.
Plugin: Startup
===============
The "Startup" plugin shows information about startup phases. This information
can provide insight about performance problems which occur before the profiler
can start.
Normally, the profiler is the best tool for understanding runtime performance,
but some work is performed before the profiler starts (for example, loading
libraries and configuration). If there is a substantial difference between the
wall time reported by the profiler and the "Entire Page" cost reported by the
Services tab, the Startup tab can help account for that time.
It is normal for starting the profiler to increase the cost of the page
somewhat: the profiler itself adds overhead while it is running, and the page
must do some work after the profiler is stopped to save the profile and
complete other shutdown operations.
Plugin: XHProf
==============
The "XHProf" plugin gives you access to the XHProf profiler. To use it, you need
to install the corresponding PHP plugin.
Once it is installed, you can use XHProf to profile the runtime performance of
a page. This will show you a detailed breakdown of where PHP spent time. This
can help find slow or inefficient application code, and is the most powerful
general-purpose performance tool available.
For instructions on installing and using XHProf, see @{article:Using XHProf}.
Next Steps
==========
Continue by:
- installing XHProf with @{article:Using XHProf}; or
- understanding and reporting performance issues with
@{article:Troubleshooting Performance Problems}.
diff --git a/src/docs/user/field/exit_codes.diviner b/src/docs/user/field/exit_codes.diviner
index 7c69b3509b..40f1d281e3 100644
--- a/src/docs/user/field/exit_codes.diviner
+++ b/src/docs/user/field/exit_codes.diviner
@@ -1,243 +1,242 @@
@title Command Line Exit Codes
@group fieldmanual
-Explains the use of exit codes in Phabricator command line scripts.
+Explains the use of exit codes in Phorge command line scripts.
Overview
========
When you run a command from the command line, it exits with an //exit code//.
This code is normally not shown on the CLI, but you can examine the exit code
of the last command you ran by looking at `$?` in your shell:
$ ls
...
$ echo $?
0
Programs which run commands can operate on exit codes, and shell constructs
like `cmdx && cmdy` operate on exit codes.
The code `0` means success. Other codes signal some sort of error or status
condition, depending on the system and command.
-With rare exception, Phabricator uses //all other codes// to signal
+With rare exception, Phorge uses //all other codes// to signal
**catastrophic failure**.
This is an explicit architectural decision and one we are unlikely to deviate
from: generally, we will not accept patches which give a command a nonzero exit
code to indicate an expected state, an application status, or a minor abnormal
condition.
Generally, this decision reflects a philosophical belief that attaching
application semantics to exit codes is a relic of a simpler time, and that
they are not appropriate for communicating application state in a modern
operational environment. This document explains the reasoning behind our use of
exit codes in more detail.
-In particular, this approach is informed by a focus on operating Phabricator
+In particular, this approach is informed by a focus on operating Phorge
clusters at scale. This is not a common deployment scenario, but we consider it
the most important one. Our use of exit codes makes it easier to deploy and
-operate a Phabricator cluster at larger scales. It makes it slightly harder to
+operate a Phorge cluster at larger scales. It makes it slightly harder to
deploy and operate a small cluster or single host by gluing together `bash`
scripts. We are willingly trading the small scale away for advantages at larger
scales.
Problems With Exit Codes
========================
We do not use exit codes to communicate application state because doing so
makes it harder to write correct scripts, and the primary benefit is that it
makes it easier to write incorrect ones.
This is somewhat at odds with the philosophy of "worse is better", but a modern
operations environment faces different forces than the interactive shell did
in the 1970s, particularly at scale.
We consider correctness to be very important to modern operations environments.
-In particular, we manage a Phabricator cluster (Phacility) and believe that
-having reliable, repeatable processes for provisioning, configuration and
-deployment is critical to maintaining and scaling our operations. Our use of
-exit codes makes it easier to implement processes that are correct and reliable
-on top of Phabricator management scripts.
+In particular, we believe that having reliable, repeatable processes for
+provisioning, configuration and deployment is critical to maintaining and
+scaling our operations. Our use of exit codes makes it easier to implement
+processes that are correct and reliable on top of Phorge management scripts.
Exit codes as signals for application state are problematic because they are
ambiguous: you can't use them to distinguish between dissimilar failure states
which should prompt very different operational responses.
Exit codes primarily make writing things like `bash` scripts easier, but we
think you shouldn't be writing `bash` scripts in a modern operational
environment if you care very much about your software working.
Software environments which are powerful enough to handle errors properly are
also powerful enough to parse command output to unambiguously read and react to
complex state. Communicating application state through exit codes almost
exclusively makes it easier to handle errors in a haphazard way which is often
incorrect.
Exit Codes are Ambiguous
========================
In many cases, exit codes carry very little information and many different
conditions can produce the same exit code, including conditions which should
prompt very different responses.
The command line tool `grep` searches for text. For example, you might run
a command like this:
$ grep zebra corpus.txt
This searches for the text `zebra` in the file `corpus.txt`. If the text is
not found, `grep` exits with a nonzero exit code (specifically, `1`).
Suppose you run `grep zebra corpus.txt` and observe a nonzero exit code. What
does that mean? These are //some// of the possible conditions which are
consistent with your observation:
- The text `zebra` was not found in `corpus.txt`.
- `corpus.txt` does not exist.
- You do not have permission to read `corpus.txt`.
- `grep` is not installed.
- You do not have permission to run `grep`.
- There is a bug in `grep`.
- Your `grep` binary is corrupt.
- `grep` was killed by a signal.
If you're running this command interactively on a single machine, it's probably
OK for all of these conditions to be conflated. You aren't going to examine the
exit code anyway (it isn't even visible to you by default), and `grep` likely
printed useful information to `stderr` if you hit one of the less common issues.
If you're running this command from operational software (like deployment,
configuration or monitoring scripts) and you care about the correctness and
repeatability of your process, we believe conflating these conditions is not
OK. The operational response to text not being present in a file should almost
always differ substantially from the response to the file not being present or
`grep` being broken.
In a particularly bad case, a broken `grep` might cause a careless deployment
script to continue down an inappropriate path and cascade into a more serious
failure.
Even in a less severe case, unexpected conditions should be detected and raised
to operations staff. `grep` being broken or a file that is expected to exist
not existing are both detectable, unexpected, and likely severe conditions, but
they can not be differentiated and handled by examining the exit code of
`grep`. It is much better to detect and raise these problems immediately than
discover them after a lengthy root cause analysis.
Some of these conditions can be differentiated by examining the specific exit
code of the command instead of acting on all nonzero exit codes. However, many
failure conditions produce the same exit codes (particularly code `1`) and
there is no way to guarantee that a particular code signals a particular
condition, especially across systems.
Realistically, it is also relatively rare for scripts to even make an effort to
distinguish between exit codes, and all nonzero exit codes are often treated
the same way.
Bash Scripts are not Robust
============================
Exit codes that indicate application status make writing `bash` scripts (or
scripts in other tools which provide a thin layer on top of what is essentially
`bash`) a lot easier and more convenient.
For example, it is pretty tricky to parse JSON in `bash` or with standard
command-line tools, and much easier to react to exit codes. This is sometimes
used as an argument for communicating application status in exit codes.
We reject this because we don't think you should be writing `bash` scripts if
you're doing real operations. Fundamentally, `bash` shell scripts are not a
robust building block for creating correct, reliable operational processes.
Here is one problem with using `bash` scripts to perform operational tasks.
Consider this command:
$ mysqldump | gzip > backup.sql.gz
Now, consider this command:
$ mysqldermp | gzip > backup.sql.gz
These commands represent a fairly standard way to accomplish a task (dumping
a compressed database backup to disk) in a `bash` script.
Note that the second command contains a typo (`dermp` instead of `dump`) which
will cause the command to exit abruptly with a nonzero exit code.
However, both these statements run successfully and exit with exit code `0`
(indicating success). Both will create a `backup.sql.gz` file. One backs up
your data; the other never backs up your data. This second command will never
work and never do what the author intended, but will appear successful under
casual inspection.
These behaviors are the same under `set -e`.
This fragile attitude toward error handling is endemic to `bash` scripts. The
default behavior is to continue on errors, and it isn't easy to change this
default. Options like `set -e` are unreliable and it is difficult to detect and
react to errors in fundamental constructs like pipes. The tools that `bash`
scripts employ (like `grep`) emit ambiguous error codes. Scripts can not help
but propagate this ambiguity no matter how careful they are with error handling.
It is likely //possible// to implement these things safely and correctly in
`bash`, but it is not easy or straightforward. More importantly, it is not the
default: the default behavior of `bash` is to ignore errors and continue.
Gluing commands together in `bash` or something that sits on top of `bash`
makes it easy and convenient to get a process that works fairly well most of
the time at small scales, but we are not satisfied that it represents a robust
foundation for operations at larger scales.
Reacting to State
=================
Instead of communicating application state through exit codes, we generally
communicate application state through machine-parseable output with a success
(`0`) exit code. All nonzero exit codes indicate catastrophic failure which
requires operational intervention.
Callers are expected to request machine-parseable output if necessary (for
example, by passing a `--json` flag or other similar flags), verify the command
exits with a `0` exit code, parse the output, then react to the state it
communicates as appropriate.
In a sufficiently powerful scripting environment (e.g., one with data
structures and a JSON parser), this is straightforward and makes it easy to
react precisely and correctly. It also allows scripts to communicate
arbitrarily complex state. Provided your environment gives you an appropriate
toolset, it is much more powerful and not significantly more complex than using
error codes.
Most importantly, it allows the calling environment to treat nonzero exit
statuses as catastrophic failure by default.
Moving Forward
==============
Given these concerns, we are generally unwilling to bring changes which use
exit codes to communicate application state (other than catastrophic failure)
into the upstream. There are some exceptions, but these are rare. In
particular, ease of use in a `bash` environment is not a compelling motivation.
We are broadly willing to make output machine parseable or provide an explicit
machine output mode (often a `--json` flag) if there is a reasonable use case
-for it. However, we operate a large production cluster of Phabricator instances
+for it. However, we operate a large production cluster of Phorge instances
with the tools available in the upstream, so the lack of machine parseable
output is not sufficient to motivate adding such output on its own: we also
need to understand the problem you're facing, and why it isn't a problem we
face. A simpler or cleaner approach to the problem may already exist.
-If you just want to write `bash` scripts on top of Phabricator scripts and you
+If you just want to write `bash` scripts on top of Phorge scripts and you
are unswayed by these concerns, you can often just build a composite command to
get roughly the same effect that you'd get out of an exit code.
For example, you can pipe things to `grep` to convert output into exit codes.
This should generally have failure rates that are comparable to the background
failure level of relying on `bash` as a scripting environment.
diff --git a/src/docs/user/field/performance.diviner b/src/docs/user/field/performance.diviner
index c5980acd0e..09e7023fb9 100644
--- a/src/docs/user/field/performance.diviner
+++ b/src/docs/user/field/performance.diviner
@@ -1,179 +1,179 @@
@title Troubleshooting Performance Problems
@group fieldmanual
Guide to the troubleshooting slow pages and hangs.
Overview
========
This document describes how to isolate, examine, understand and resolve or
report performance issues like slow pages and hangs.
This document covers the general process for handling performance problems,
and outlines the major tools available for understanding them:
- **Multimeter** helps you understand sources of load and broad resource
utilization. This is a coarse, high-level tool.
- **DarkConsole** helps you dig into a specific slow page and understand
service calls. This is a general, mid-level tool.
- **XHProf** gives you detailed application performance profiles. This
is a fine-grained, low-level tool.
Performance and the Upstream
============================
Performance issues and hangs will often require upstream involvement to fully
-resolve. The intent is for Phabricator to perform well in all reasonable cases,
+resolve. The intent is for Phorge to perform well in all reasonable cases,
not require tuning for different workloads (as long as those workloads are
generally reasonable). Poor performance with a reasonable workload is likely a
bug, not a configuration problem.
-However, some pages are slow because Phabricator legitimately needs to do a lot
+However, some pages are slow because Phorge legitimately needs to do a lot
of work to generate them. For example, if you write a 100MB wiki document,
-Phabricator will need substantial time to process it, it will take a long time
+Phorge will need substantial time to process it, it will take a long time
to download over the network, and your browser will probably not be able to
render it especially quickly.
-We may be able to improve performance in some cases, but Phabricator is not
+We may be able to improve performance in some cases, but Phorge is not
magic and can not wish away real complexity. The best solution to these problems
is usually to find another way to solve your problem: for example, maybe the
100MB document can be split into several smaller documents.
Here are some examples of performance problems under reasonable workloads that
the upstream can help resolve:
- {icon check, color=green} Commenting on a file and mentioning that same
file results in a hang.
- {icon check, color=green} Creating a new user takes many seconds.
- {icon check, color=green} Loading Feed hangs on 32-bit systems.
The upstream will be less able to help resolve unusual workloads with high
inherent complexity, like these:
- {icon times, color=red} A 100MB wiki page takes a long time to render.
- {icon times, color=red} A Turing-complete simulation of Conway's Game of
Life implemented in 958,000 Herald rules executes slowly.
- {icon times, color=red} Uploading an 8GB file takes several minutes.
Generally, the path forward will be:
- Follow the instructions in this document to gain the best understanding of
the issue (and of how to reproduce it) that you can.
- In particular, is it being caused by an unusual workload (like a 100MB
wiki page)? If so, consider other ways to solve the problem.
- File a report with the upstream by following the instructions in
@{article:Contributing Bug Reports}.
The remaining sections in this document walk through these steps.
Understanding Performance Problems
==================================
To isolate, examine, and understand performance problems, follow these steps:
**General Slowness**: If you are experiencing generally poor performance, use
Multimeter to understand resource usage and look for load-based causes. See
@{article:Multimeter User Guide}. If that isn't fruitful, treat this like a
reproducible performance problem on an arbitrary page.
**Hangs**: If you are experiencing hangs (pages which never return, or which
time out with a fatal after some number of seconds), they are almost always
the result of bugs in the upstream. Report them by following these
instructions:
- Set `debug.time-limit` to a value like `5`.
- Reproduce the hang. The page should exit after 5 seconds with a more useful
stack trace.
- File a report with the reproduction instructions and the stack trace in
the upstream. See @{article:Contributing Bug Reports} for detailed
instructions.
- Clear `debug.time-limit` again to take your install out of debug mode.
If part of the reproduction instructions include "Create a 100MB wiki page",
the upstream may be less sympathetic to your cause than if reproducing the
issue does not require an unusual, complex workload.
In some cases, the hang may really just a very large amount of processing time.
If you're very excited about 100MB wiki pages and don't mind waiting many
minutes for them to render, you may be able to adjust `max_execution_time` in
your PHP configuration to allow the process enough time to complete, or adjust
settings in your webserver config to let it wait longer for results.
**DarkConsole**: If you have a reproducible performance problem (for example,
loading a specific page is very slow), you can enable DarkConsole (a builtin
debugging console) to examine page performance in detail.
The two most useful tabs in DarkConsole are the "Services" tab and the
"XHProf" tab.
The "Services" module allows you to examine service calls (network calls,
subprocesses, events, etc) and find slow queries, slow services, inefficient
query plans, and unnecessary calls. Broadly, you're looking for slow or
repeated service calls, or calls which don't make sense given what the page
should be doing.
After installing XHProf (see @{article:Using XHProf}) you'll gain access to the
"XHProf" tab, which is a full tracing profiler. You can use the "Profile Page"
button to generate a complete trace of where a page is spending time. When
reading a profile, you're looking for the overall use of time, and for anything
which sticks out as taking unreasonably long or not making sense.
See @{article:Using DarkConsole} for complete instructions on configuring
and using DarkConsole.
**AJAX Requests**: To debug Ajax requests, activate DarkConsole and then turn
on the profiler or query analyzer on the main request by clicking the
appropriate button. The setting will cascade to Ajax requests made by the page
and they'll show up in the console with full query analysis or profiling
information.
**Command-Line Hangs**: If you have a script or daemon hanging, you can send
it `SIGHUP` to have it dump a stack trace to `sys_get_temp_dir()` (usually
`/tmp`).
Do this with:
```
$ kill -HUP <pid>
```
You can use this command to figure out where the system's temporary directory
is:
```
$ php -r 'echo sys_get_temp_dir()."\n";'
```
On most systems, this is `/tmp`. The trace should appear in that directory with
a name like `phabricator_backtrace_<pid>`. Examining this trace may provide
a key to understanding the problem.
**Command-Line Performance**: If you have general performance issues with
command-line scripts, you can add `--trace` to see a service call log. This is
similar to the "Services" tab in DarkConsole. This may help identify issues.
After installing XHProf, you can also add `--xprofile <filename>` to emit a
detailed performance profile. You can `arc upload` these files and then view
them in XHProf from the web UI.
Next Steps
==========
If you've done all you can to isolate and understand the problem you're
experiencing, report it to the upstream. Including as much relevant data as
you can, including:
- reproduction instructions;
- traces from `debug.time-limit` for hangs;
- screenshots of service call logs from DarkConsole (review these carefully,
as they can sometimes contain sensitive information);
- traces from CLI scripts with `--trace`;
- traces from sending HUP to processes; and
- XHProf profile files from `--xprofile` or "Download .xhprof Profile" in
the web UI.
After collecting this information:
- follow the instructions in @{article:Contributing Bug Reports} to file
a report in the upstream.
diff --git a/src/docs/user/field/permanently_destroying_data.diviner b/src/docs/user/field/permanently_destroying_data.diviner
index 04907fc0be..8f5d667252 100644
--- a/src/docs/user/field/permanently_destroying_data.diviner
+++ b/src/docs/user/field/permanently_destroying_data.diviner
@@ -1,92 +1,92 @@
@title Permanently Destroying Data
@group fieldmanual
How to permanently destroy data and manage leaked secrets.
Overview
========
-Phabricator intentionally makes it difficult to permanently destroy data, but
+Phorge intentionally makes it difficult to permanently destroy data, but
provides a command-line tool for destroying objects if you're certain that
you want to destroy something.
**Disable vs Destroy**: Most kinds of objects can be disabled, deactivated,
closed, or archived. These operations place them in inactive states and
preserve their transaction history.
(NOTE) Disabling (rather than destroying) objects is strongly recommended
unless you have a very good reason to want to permanently destroy an object.
Destroying Data
===============
To permanently destroy an object, run this command from the command line:
```
-phabricator/ $ ./bin/remove destroy <object>
+phorge/ $ ./bin/remove destroy <object>
```
The `<object>` may be an object monogram or PHID. For instance, you can use
`@alice` to destroy a particular user, or `T123` to destroy a particular task.
(IMPORTANT) This operation is permanent and can not be undone.
CLI Access Required
===================
-In almost all cases, Phabricator requires operational access from the CLI to
+In almost all cases, Phorge requires operational access from the CLI to
permanently destroy data. One major reason for this requirement is that it
limits the reach of an attacker who compromises a privileged account.
The web UI is generally append-only and actions generally leave an audit
trail, usually in the transaction log. Thus, an attacker who compromises an
account but only gains access to the web UI usually can not do much permanent
damage and usually can not hide their actions or cover their tracks.
Another reason that destroying data is hard is simply that it's permanent and
can not be undone, so there's no way to recover from mistakes.
Leaked Secrets
==============
Sometimes you may want to destroy an object because it has leaked a secret,
like an API key or another credential. For example, an engineer might
accidentally send a change for review which includes a sensitive private key.
-No Phabricator command can rewind time, and once data is written to Phabricator
+No Phorge command can rewind time, and once data is written to Phorge
the cat is often out of the bag: it has often been transmitted to external
-systems which Phabricator can not interact with via email, webhooks, API calls,
+systems which Phorge can not interact with via email, webhooks, API calls,
repository mirroring, CDN caching, and so on. You can try to clean up the mess,
but you're generally already too late.
The `bin/remove destroy` command will make a reasonable attempt to completely
destroy objects, but this is just an attempt. It can not unsend email or uncall
the API, and no command can rewind time and undo a leak.
-**Revoking Credentials**: If Phabricator credentials were accidentally
+**Revoking Credentials**: If Phorge credentials were accidentally
disclosed, you can revoke them so they no longer function. See
@{article:Revoking Credentials} for more information.
Preventing Leaks
================
Because time can not be rewound, it is best to prevent sensitive data from
-leaking in the first place. Phabricator supports some technical measures that
+leaking in the first place. Phorge supports some technical measures that
can make it more difficult to accidentally disclose secrets:
**Differential Diff Herald Rules**: You can write "Differential Diff" rules
in Herald that reject diffs before they are written to disk by using the
"Block diff with message" action.
These rules can reject diffs based on affected file names or file content.
This is a coarse tool, but rejecting diffs which contain strings like
`BEGIN RSA PRIVATE KEY` may make it more difficult to accidentally disclose
certain secrets.
**Commit Content Herald Rules**: For hosted repositories, you can write
"Commit Hook: Commit Content" rules in Herald which reject pushes that contain
commit which match certain rules (like file name or file content rules).
diff --git a/src/docs/user/field/repository_hints.diviner b/src/docs/user/field/repository_hints.diviner
index d618fac4aa..9c50243a9a 100644
--- a/src/docs/user/field/repository_hints.diviner
+++ b/src/docs/user/field/repository_hints.diviner
@@ -1,134 +1,134 @@
@title Repository Hints and Rewriting Commits
@group fieldmanual
Dealing with rewrites of published repositories and other unusual problems.
Overview
========
-Some repositories have unusual commits. You can provide "hints" to Phabricator
+Some repositories have unusual commits. You can provide "hints" to Phorge
about these commits to improve behavior.
Supported hints are:
- **Rewritten Commits**: If you have rewritten the history of a published
repository, you can provide hints about the mapping from old commits to
new commits so it can redirect users who visit old pages to the proper
new pages.
- **Unreadable Commits**: If some commits are not readable (which is rare,
but can happen in some cases if they are generated with an external tool)
- you can provide hints so that Phabricator doesn't try to read them.
+ you can provide hints so that Phorge doesn't try to read them.
The remainder of this document explains how to create and remove hints, and how
to specify each type of hint.
Creating Hints
==============
To create hints, pipe a JSON list of hints to `bin/repository hint`:
```
-phabricator/ $ cat hints.json | ./bin/repository hint
+phorge/ $ cat hints.json | ./bin/repository hint
```
The hints should be a list of objects like this:
```lang=json
[
...
{
"repository": "XYZ",
"hint": "...",
"old": "abcdef1234abcdef1234abcdef1234abcdef1234",
"new": "..."
}
...
]
```
Each hint may have these keys:
- `repository`: A repository identifier (ID, PHID, callsign or short name).
- `hint`: The hint type, see below.
- `old`: The full identifier or commit hash of the commit you want to
provide a hint for.
- `new`: For hints which specify a new commit, the full identifier or commit
hash of the new commit.
See below for exactly how to specify each type of hint.
Removing Hints
==============
To remove a hint, create a hint of type `"none"`. This will remove any existing
hint.
For example, use a hint specification like this:
```lang=json
[
{
"repository": "XYZ",
"hint": "none",
"old": "abcdef1234abcdef1234abcdef1234abcdef1234"
}
]
```
-Phabricator won't treat commits without any hint specially.
+Phorge won't treat commits without any hint specially.
Hint: Rewritten Commits
=======================
The `"rewritten"` hint allows you to redirect old commits to new commits after
a rewrite of published history. You should normally avoid rewriting published
commits, but sometimes this is necessary: for example, if a repository has
become unwieldy because it contains large binaries, you may strip them from
history.
To provide this kind of hint, pass the `"old"` commit hash (from before the
rewrite) and the `"new"` commit hash (from after the rewrite).
For example, a hint might look like this:
```lang=json
[
{
"repository": "XYZ",
"hint": "rewritten",
"old": "abcdef1234abcdef1234abcdef1234abcdef1234",
"new": "098765ffaabbccdd4680098765ffaabbccdd4680"
}
]
```
-Phabricator will show users that the commit was rewritten in the web UI.
+Phorge will show users that the commit was rewritten in the web UI.
Hint: Unreadable Commits
========================
-The `"unreadable"` hint allows you to tell Phabricator that it should not
+The `"unreadable"` hint allows you to tell Phorge that it should not
bother trying to read the changes associated with a particular commit. In
some rare cases, repositories can contain commits which aren't readable
(for example, if they were created by external tools during an import or
merge process).
To provide this kind of hint, pass the `"old"` commit which is affected.
For example, a hint might look like this:
```lang=json
[
{
"repository": "XYZ",
"hint": "unreadable",
"old": "abcdef1234abcdef1234abcdef1234abcdef1234"
}
]
```
-Phabricator won't try to read, parse, import, or display the changes associated
+Phorge won't try to read, parse, import, or display the changes associated
with this commit.
diff --git a/src/docs/user/field/repository_imports.diviner b/src/docs/user/field/repository_imports.diviner
index 262874186c..c5fba58dd7 100644
--- a/src/docs/user/field/repository_imports.diviner
+++ b/src/docs/user/field/repository_imports.diviner
@@ -1,247 +1,247 @@
@title Troubleshooting Repository Imports
@group fieldmanual
Guide to the troubleshooting repositories which import incompletely.
Overview
========
When you first import an external source code repository (or push new commits to
-a hosted repository), Phabricator imports those commits in the background.
+a hosted repository), Phorge imports those commits in the background.
While a repository is initially importing, some features won't work. While
individual commits are importing, some of their metadata won't be available in
the web UI.
Sometimes, the import process may hang or fail to complete. This document can
help you understand the import process and troubleshoot problems with it.
Understanding the Import Pipeline
=================================
-Phabricator first performs commit discovery on repositories. This examines
+Phorge first performs commit discovery on repositories. This examines
a repository and identifies all the commits in it at a very shallow level,
then creates stub objects for them. These stub objects primarily serve to
assign various internal IDs to each commit.
Commit discovery occurs in the update phase, and you can learn more about
updates in @{article:Diffusion User Guide: Repository Updates}.
After commits are discovered, background tasks are queued to actually import
commits. These tasks do things like look at commit messages, trigger mentions
and update related objects, cache changes, trigger Herald, publish feed stories
and email, and apply Owners rules. You can learn more about some of these steps
in @{article:Diffusion User Guide: Permanent Refs}.
Specifically, the import pipeline has four steps:
- **Message**: Parses the commit message and author metadata.
- **Change**: Caches the paths the commit affected.
- **Owners**: Runs Owners rules.
- **Herald**: Runs Herald rules and publishes notifications.
These steps run in sequence for each commit, but all discovered commits import
in parallel.
Identifying Missing Steps
=========================
There are a few major pieces of information you can look at to understand where
the import process is stuck.
First, to identify which commits have missing import steps, run this command:
```
-phabricator/ $ ./bin/repository importing rXYZ
+phorge/ $ ./bin/repository importing rXYZ
```
That will show what work remains to be done. Each line shows a commit which
is discovered but not imported, and the import steps that are remaining for
that commit. Generally, the commit is stuck on the first step in the list.
Second, load the Daemon Console (at `/daemon/` in the web UI). This will show
what work is currently being done and waiting to complete. The most important
sections are "Queued Tasks" (work waiting in queue) and "Leased Tasks"
(work currently being done).
Third, run this command to look at the daemon logs:
```
-phabricator/ $ ./bin/phd log
+phorge/ $ ./bin/phd log
```
This can show you any errors the daemons have encountered recently.
The next sections will walk through how to use this information to understand
and resolve the issue.
Handling Permanent Failures
===========================
Some commits can not be imported, which will permanently stop a repository from
fully importing. These are rare, but can be caused by unusual data in a
repository, version peculiarities, or bugs in the importer.
Permanent failures usually look like a small number of commits stuck on the
"Message" or "Change" steps in the output of `repository importing`. If you
have a larger number of commits, it is less likely that there are any permanent
problems.
In the Daemon console, permanent failures usually look like a small number of
tasks in "Leased Tasks" with a large failure count. These tasks are retrying
until they succeed, but a bug is permanently preventing them from succeeding,
so they'll rack up a large number of retries over time.
In the daemon log, these commits usually emit specific errors showing why
they're failing to import.
These failures are the easiest to identify and understand, and can often be
resolved quickly. Choose some failing commit from the output of `bin/repository
importing` and use this command to re-run any missing steps manually in the
foreground:
```
-phabricator/ $ ./bin/repository reparse --importing --trace rXYZabcdef012...
+phorge/ $ ./bin/repository reparse --importing --trace rXYZabcdef012...
```
This command is always safe to run, no matter what the actual root cause of
the problem is.
If this fails with an error, you've likely identified a problem with
-Phabricator. Collect as much information as you can about what makes the commit
+Phorge. Collect as much information as you can about what makes the commit
special and file a bug in the upstream by following the instructions in
@{article:Contributing Bug Reports}.
If the commit imports cleanly, this is more likely to be caused by some other
issue.
Handling Temporary Failures
===========================
Some commits may temporarily fail to import: perhaps the network or services
may have briefly been down, or some configuration wasn't quite right, or the
daemons were killed halfway through the work.
These commits will retry eventually and usually succeed, but some of the retry
time limits are very conservative (up to 24 hours) and you might not want to
wait that long.
In the Daemon console, temporarily failures usually look like tasks in the
"Leased Tasks" column with a large "Expires" value but a low "Failures" count
-(usually 0 or 1). The "Expires" column is showing how long Phabricator is
+(usually 0 or 1). The "Expires" column is showing how long Phorge is
waiting to retry these tasks.
In the daemon log, these temporary failures might have created log entries, but
might also not have. For example, if the failure was rooted in a network issue
that probably will create a log entry, but if the failure was rooted in the
daemons being abruptly killed that may not create a log entry.
You can follow the instructions from "Handling Permanent Failures" above to
reparse steps individually to look for an error that represents a root cause,
but sometimes this can happen because of some transient issue which won't be
identifiable.
The easiest way to fix this is to restart the daemons. When you restart
daemons, all task leases are immediately expired, so any tasks waiting for a
long time will run right away:
```
-phabricator/ $ ./bin/phd restart
+phorge/ $ ./bin/phd restart
```
This command is always safe to run, no matter what the actual root cause of
the problem is.
After restarting the daemons, any pending tasks should be able to retry
immediately.
For more information on managing the daemons, see
@{article:Managing Daemons with phd}.
Forced Parsing
==============
In rare cases, the actual tasks may be lost from the task queue. Usually, they
have been stolen by gremlins or spirited away by ghosts, or someone may have
been too ambitious with running manual SQL commands and deleted a bunch of
extra things they shouldn't have.
There is no normal set of conditions under which this should occur, but you can
-force Phabricator to re-queue the tasks to recover from it if it does occur.
+force Phorge to re-queue the tasks to recover from it if it does occur.
This will look like missing steps in `repository importing`, but nothing in the
"Queued Tasks" or "Leased Tasks" sections of the daemon console. The daemon
logs will also be empty, since the tasks have vanished.
To re-queue parse tasks for a repository, run this command, which will queue
up all of the missing work in `repository importing`:
```
-phabricator/ $ ./bin/repository reparse --importing --all rXYZ
+phorge/ $ ./bin/repository reparse --importing --all rXYZ
```
This command may cause duplicate work to occur if you have misdiagnosed the
problem and the tasks aren't actually lost. For example, it could queue a
-second task to perform publishing, which could cause Phabricator to send a
+second task to perform publishing, which could cause Phorge to send a
second copy of email about the commit. Other than that, it is safe to run even
if this isn't the problem.
After running this command, you should see tasks in "Queued Tasks" and "Leased
Tasks" in the console which correspond to the commits in `repository
importing`, and progress should resume.
Forced Imports
==============
In some cases, you might want to force a repository to be flagged as imported
even though the import isn't complete. The most common and reasonable case
where you might want to do this is if you've identified a permanent failure
with a small number of commits (maybe just one) and reported it upstream, and
are waiting for a fix. You might want to start using the repository immediately,
even if a few things can't import yet.
You should be cautious about doing this. The "importing" flag controls
publishing of notifications and email, so if you flag a repository as imported
but it still has a lot of work queued, it may send an enormous amount of email
as that work completes.
To mark a repository as imported even though it really isn't, run this
command:
```
-phabricator/ $ ./bin/repository mark-imported rXYZ
+phorge/ $ ./bin/repository mark-imported rXYZ
```
If you do this by mistake, you can reverse it later by using the
`--mark-not-imported` flag.
General Tips
============
Broadly, `bin/repository` contains several useful debugging commands which
let you figure out where failures are occurring. You can add the `--trace` flag
to any command to get more details about what it is doing. For any command,
you can use `help` to learn more about what it does and which flag it takes:
```
-phabricator/ $ bin/repository help <command>
+phorge/ $ bin/repository help <command>
```
In particular, you can use flags with the `repository reparse` command to
manually run parse steps in the foreground, including re-running steps and
running steps out of order.
Next Steps
==========
Continue by:
- returning to the @{article:Diffusion User Guide}.
diff --git a/src/docs/user/field/restarting.diviner b/src/docs/user/field/restarting.diviner
index 5bdd92da78..638e1f1490 100644
--- a/src/docs/user/field/restarting.diviner
+++ b/src/docs/user/field/restarting.diviner
@@ -1,116 +1,116 @@
-@title Restarting Phabricator
+@title Restarting Phorge
@group fieldmanual
Instructions on how to restart HTTP and PHP servers to reload configuration
-changes in Phabricator.
+changes in Phorge.
Overview
========
-Phabricator's setup and configuration instructions sometimes require you to
+Phorge's setup and configuration instructions sometimes require you to
restart your server processes, particularly after making configuration changes.
This document explains how to restart them properly.
In general, you need to restart both whatever is serving HTTP requests and
whatever is serving PHP requests. In some cases, these will be the same process
and handled with one restart command. In other cases, they will be two
different processes and handled with two different restart commands.
{icon exclamation-circle color=blue} If you have two different processes (for
example, nginx and PHP-FPM), you need to issue two different restart commands.
It's important to restart both your HTTP server and PHP server because each
server caches different configuration and settings. Restarting both servers
after making changes ensures you're running up-to-date configuration.
To restart properly:
- Identify which HTTP server you are running (for example, Apache or nginx).
- Identify which PHP server you are running (for example, mod_php or PHP-FPM).
- For each server, follow the instructions below to restart it.
- If the instructions tell you to do so, make sure you restart **both**
servers!
Quick Start
===========
**Apache**: If you use Apache with `mod_php`, you can just restart Apache. You
do not need to restart `mod_php` separately. See below for instructions on how
to do this if you aren't sure. This is a very common configuration.
**nginx**: If you use nginx with PHP-FPM, you need to restart both nginx and
PHP-FPM. See below for instructions on how to do this if you aren't sure. This
is also a very common configuration.
It's possible to use Apache or nginx in other configurations, or a different
webserver. Consult the documentation for your system or webserver if you aren't
sure how things are set up.
Universal Restart
=================
If you are having trouble properly restarting processes on your server, try
turning it off and on again. This is effective on every known system and
under all configurations.
HTTP Server: Apache
===================
If you are using Apache with `mod_php`, you only need to restart Apache.
If you are using Apache in FastCGI mode, you need to restart both Apache and
the FCGI server (usually PHP-FPM). This is very unusual.
The correct method for restarting Apache depends on what system you are
running. Consult your system documentation for details. You might use a command
like one of these on your system, or a different command:
```
$ sudo apachectl restart
$ sudo /etc/init.d/httpd restart
$ sudo service apache2 restart
```
HTTP Server: Nginx
==================
If you're using Nginx with PHP-FPM, you need to restart both of them. This is
the most common Nginx configuration.
The correct method for restarting Nginx depends on what system you are running.
Consult your system documentation for details. You might use a command like
one of these on your system, or a different command:
```
$ sudo /etc/init.d/nginx restart
$ sudo service nginx restart
```
PHP Server: mod_php
===================
This is a builtin PHP server that runs within Apache. Restarting Apache (see
above) is sufficient to restart it. There is no separate restart command for
`mod_php`, so you don't need to do anything else.
PHP Server: PHP-FPM
===================
If you're using FastCGI mode, PHP-FPM is the most common PHP FastCGI server.
You'll need to restart it if you're running it.
The correct method for restarting PHP-FPM depends on what system you are
running. Consult your system documentation for details. You might use a command
like one of these on your system, or a different command:
```
$ sudo /etc/init.d/php-fpm restart
$ sudo service php5-fpm reload
```
diff --git a/src/docs/user/field/revoking_credentials.diviner b/src/docs/user/field/revoking_credentials.diviner
index b1e18bcd97..9d3c046fbe 100644
--- a/src/docs/user/field/revoking_credentials.diviner
+++ b/src/docs/user/field/revoking_credentials.diviner
@@ -1,101 +1,101 @@
@title Revoking Credentials
@group fieldmanual
Revoking credentials, tokens, and sessions.
Overview
========
If you've become aware of a security breach that affects you, you may want to
revoke or cycle credentials in case anything was leaked.
You can revoke credentials with the `bin/auth revoke` tool. This document
describes how to use the tool and how revocation works.
bin/auth revoke
===============
The `bin/auth revoke` tool revokes specified sets of credentials from
specified targets. For example, if you believe `@alice` may have had her SSH
key compromised, you can revoke her keys like this:
```
-phabricator/ $ ./bin/auth revoke --type ssh --from @alice
+phorge/ $ ./bin/auth revoke --type ssh --from @alice
```
The flag `--everything` revokes all credential types.
The flag `--everywhere` revokes credentials from all objects. For most
credential types this means "all users", but some credentials (like SSH keys)
can also be associated with other kinds of objects.
Note that revocation can be disruptive (users must choose new passwords,
generate new API tokens, configure new SSH keys, etc) and can not be easily
undone if you perform an excessively broad revocation.
You can use the `--list` flag to get a list of available credential types
which can be revoked. This includes upstream credential types, and may include
third-party credential types if you have extensions installed.
To list all revokable credential types:
```
-phabricator/ $ ./bin/auth revoke --list
+phorge/ $ ./bin/auth revoke --list
```
To get details about exactly how a specific revoker works:
```
-phabricator/ $ ./bin/auth revoke --list --type ssh
+phorge/ $ ./bin/auth revoke --list --type ssh
```
Revocation vs Removal
=====================
Generally, `bin/auth revoke` **revokes** credentials, rather than just deleting
or removing them. That is, the credentials are moved to a permanent revocation
list of invalid credentials.
For example, revoking an SSH key prevents users from adding that key back to
their account: they must generate and add a new, unique key. Likewise, revoked
passwords can not be reused.
Although it is technically possible to reinstate credentials by removing them
from revocation lists, there are no tools available for this and you should
treat revocation lists as permanent.
Scenarios
=========
**Network Compromise**: If you believe you may have been affected by a network
compromise (where an attacker may have observed data transmitted over the
network), you should revoke the `password`, `conduit`, `session`, and
`temporary` credentials for all users. This will revoke all credentials which
are normally sent over the network.
You can revoke these credentials by running these commands:
```
-phabricator/ $ ./bin/auth revoke --type password --everywhere
-phabricator/ $ ./bin/auth revoke --type conduit --everywhere
-phabricator/ $ ./bin/auth revoke --type session --everywhere
-phabricator/ $ ./bin/auth revoke --type temporary --everywhere
+phorge/ $ ./bin/auth revoke --type password --everywhere
+phorge/ $ ./bin/auth revoke --type conduit --everywhere
+phorge/ $ ./bin/auth revoke --type session --everywhere
+phorge/ $ ./bin/auth revoke --type temporary --everywhere
```
Depending on the nature of the compromise you may also consider revoking `ssh`
credentials, although these are usually not sent over the network because
they are asymmetric.
**User Compromise**: If you believe a user's credentials have been compromised
(for example, maybe they lost a phone or laptop) you should revoke
`--everything` from their account. This will revoke all of their outstanding
credentials without affecting other users.
You can revoke all credentials for a user by running this command:
```
-phabricator/ $ ./bin/auth revoke --everything --from @alice
+phorge/ $ ./bin/auth revoke --everything --from @alice
```
diff --git a/src/docs/user/field/worker_queue.diviner b/src/docs/user/field/worker_queue.diviner
index b88a2ea40b..d53b0771de 100644
--- a/src/docs/user/field/worker_queue.diviner
+++ b/src/docs/user/field/worker_queue.diviner
@@ -1,83 +1,83 @@
@title Managing the Worker Queue
@group fieldmanual
Advanced guide to managing the background worker task queue.
Overview
========
-Phabricator uses daemonized worker processes to execute some tasks (like
+Phorge uses daemonized worker processes to execute some tasks (like
importing repositories and sending mail) in the background.
In most cases, this queue will automatically execute tasks in an appropriate
order. However, in some cases you may want to exercise greater control over
which tasks execute, when, and at what priority.
Reference: Priority Levels
==========================
-Tasks queued by Phabricator use these default priority levels:
+Tasks queued by Phorge use these default priority levels:
| Priority | Name | Tasks |
|---|---|---|
| 1000 | `ALERTS` | Time-sensitive notifications and email. |
| 2000 | `DEFAULT` | Normal publishing and processing. |
| 2500 | `COMMIT` | Import of commits in existing repositories. |
| 3000 | `BULK` | Edits applied via "Bulk Edit" interface. |
| 3500 | `INDEX` | Search engine index updates. |
| 4000 | `IMPORT` | Import of commits in new repositories. |
Tasks with smaller priority numbers execute before tasks with larger priority
numbers (for example, a task with priority 1000 will execute before a task
with priority 2000).
Any positive integer is a valid priority level, and if you adjust the priority
of tasks with `bin/worker priority` you may select any level even if
-Phabricator would never naturally queue tasks at that level. For example, you
+Phorge would never naturally queue tasks at that level. For example, you
may adjust tasks to priority `5678`, which will make them execute after all
other types of natural tasks.
Although tasks usually execute in priority order, task execution order is not
strictly a function of priority, and task priority does not guarantee execution
order.
Large Repository Imports
========================
The most common case where you may want to make an adjustment to the default
behavior of the worker queue is when importing a very large repository like
the Linux kernel.
-Although Phabricator will automatically process imports of new repositories at
+Although Phorge will automatically process imports of new repositories at
a lower priority level than all other non-import tasks, you may still run into
issues like these:
- You may also want to import one or more //other// new repositories, and
would prefer they import at a higher priority.
- You may find overall repository performance is impacted by the large
repository import.
You can manually change the priority of tasks with `bin/worker priority`. For
example, if your copy of the Linux repository is `R123` and you'd like it to
import at a lower priority than all other tasks (including other imports of
new repositories), you can run a command like this:
```
-phabricator/ $ ./bin/worker priority --priority 5000 --container R123
+phorge/ $ ./bin/worker priority --priority 5000 --container R123
```
This means: set all tasks associated with container `R123` (in this example,
the Linux repository) to priority 5000 (which is lower than any natural
priority).
You can delay tasks until later with `bin/worker delay`, which allows you to
schedule tasks to execute at night or over the weekend. For example, to
pause an import for 6 hours, run a command like this:
```
-phabricator/ $ ./bin/worker delay --until "6 hours" --container R123
+phorge/ $ ./bin/worker delay --until "6 hours" --container R123
```
The selected tasks will not execute until 6 hours from the time this command
is issued. You can also provide an explicit date, or "now" to let tasks begin
execution immediately.
diff --git a/src/docs/user/field/xhprof.diviner b/src/docs/user/field/xhprof.diviner
index ad6fe82cb8..a1cfd36558 100644
--- a/src/docs/user/field/xhprof.diviner
+++ b/src/docs/user/field/xhprof.diviner
@@ -1,122 +1,122 @@
@title Using XHProf
@group fieldmanual
Describes how to install and use XHProf, a PHP profiling tool.
Overview
========
XHProf is a profiling tool which will let you understand application
-performance in Phabricator.
+performance in Phorge.
After you install XHProf, you can use it from the web UI and the CLI to
generate detailed performance profiles. It is the most powerful tool available
for understanding application performance and identifying and fixing slow code.
Installing XHProf
=================
You are likely to have the most luck building XHProf from source:
$ git clone https://github.com/phacility/xhprof.git
From any source distribution of the extension, build and install it like this:
$ cd xhprof/
$ cd extension/
$ phpize
$ ./configure
$ make
$ sudo make install
You may also need to add `extension=xhprof.so` to your php.ini.
You can also try using PECL to install it, but this may not work well with
recent versions of PHP:
$ pecl install xhprof
Once you've installed it, `php -i` should report it as installed (you may
see a different version number, which is fine):
$ php -i | grep xhprof
...
xhprof => 0.9.2
...
Using XHProf: Web UI
====================
To profile a web page, activate DarkConsole and navigate to the XHProf tab.
Use the **Profile Page** button to generate a profile.
For instructions on activating DarkConsole, see @{article:Using DarkConsole}.
Using XHProf: CLI
=================
From the command line, use the `--xprofile <filename>` flag to generate a
profile of any script.
-You can then upload this file to Phabricator (using `arc upload` may be easiest)
+You can then upload this file to Phorge (using `arc upload` may be easiest)
and view it in the web UI.
Analyzing Profiles
==================
Understanding profiles is as much art as science, so be warned that you may not
make much headway. Even if you aren't able to conclusively read a profile
yourself, you can attach profiles when submitting bug reports to the upstream
and we can look at them. This may yield new insight.
When looking at profiles, the "Wall Time (Inclusive)" column is usually the
most important. This shows the total amount of time spent in a function or
method and all of its children. Usually, to improve the performance of a page,
we're trying to find something that's slow and make it not slow: this column
can help identify which things are slowest.
The "Wall Time (Exclusive)" column shows time spent in a function or method,
excluding time spent in its children. This can give you hint about whether the
call itself is slow or it's just making calls to other things that are slow.
You can also get a sense of this by clicking a call to see its children, and
seeing if the bulk of runtime is spent in a child call. This tends to indicate
that you're looking at a problem which is deeper in the stack, and you need
to go down further to identify and understand it.
Conversely, if the "Wall Time (Exclusive)" column is large, or the children
of a call are all cheap, there's probably something expensive happening in the
call itself.
The "Count" column can also sometimes tip you off that something is amiss, if
a method which shouldn't be called very often is being called a lot.
Some general thing to look for -- these aren't smoking guns, but are unusual
and can lead to finding a performance issue:
- Is a low-level utility method like `phutil_utf8ize()` or `array_merge()`
taking more than a few percent of the page runtime?
- Do any methods (especially high-level methods) have >10,00 calls?
- Are we spending more than 100ms doing anything which isn't loading data
or rendering data?
- Does anything look suspiciously expensive or out of place?
- Is the profile for the slow page a lot different than the profile for a
fast page?
Some performance problems are obvious and will jump out of a profile; others
may require a more nuanced understanding of the codebase to sniff out which
parts are suspicious. If you aren't able to make progress with a profile,
report the issue upstream and attach the profile to your report.
Next Steps
==========
Continue by:
- enabling DarkConsole with @{article:Using DarkConsole}; or
- understanding and reporting performance problems with
@{article:Troubleshooting Performance Problems}.
diff --git a/src/docs/user/installation_guide.diviner b/src/docs/user/installation_guide.diviner
index 269b20b62a..0073dc6afb 100644
--- a/src/docs/user/installation_guide.diviner
+++ b/src/docs/user/installation_guide.diviner
@@ -1,148 +1,148 @@
@title Installation Guide
@group intro
-This document contains basic install instructions to get Phabricator up and
+This document contains basic install instructions to get Phorge up and
running.
Overview
========
-Phabricator is a LAMP (Linux, Apache, MySQL, PHP) application. To install
-Phabricator, you will need:
+Phorge is a LAMP (Linux, Apache, MySQL, PHP) application. To install
+Phorge, you will need:
- a normal computer to install it on (shared hosts and unusual environments
are not supported) running some flavor of Linux or a similar OS;
- - a domain name (like `phabricator.mycompany.com`);
+ - a domain name (like `phorge.example.com`);
- basic sysadmin skills;
- Apache, nginx, or another webserver;
- PHP, MySQL, and Git.
The remainder of this document details these requirements.
Installation Requirements
=========================
You will need **a computer**. Options include:
- **A Normal Computer**: This is strongly recommended. Many installs use a VM
- in EC2. Phabricator installs properly and works well on a normal computer.
+ in EC2. Phorge installs properly and works well on a normal computer.
- **A Shared Host**: This may work, but is not recommended. Many shared
- hosting environments have restrictions which prevent some of Phabricator's
+ hosting environments have restrictions which prevent some of Phorge's
features from working. Consider using a normal computer instead. We do not
support shared hosts.
- **A SAN Appliance, Network Router, Gaming Console, Raspberry Pi, etc.**:
- Although you may be able to install Phabricator on specialized hardware, it
+ Although you may be able to install Phorge on specialized hardware, it
is unlikely to work well and will be difficult for us to support. Strongly
consider using a normal computer instead. We do not support specialized
hardware.
- **A Toaster, Car, Firearm, Thermostat, etc.**: Yes, many modern devices now
have embedded computing capability. We live in interesting times. However,
- you should not install Phabricator on these devices. Instead, install it on
+ you should not install Phorge on these devices. Instead, install it on
a normal computer. We do not support installing on noncomputing devices.
-To install the Phabricator server software, you will need an **operating
+To install the Phorge server software, you will need an **operating
system** on your normal computer which is **not Windows**. Note that the
command line interface //does// work on Windows, and you can //use//
-Phabricator from any operating system with a web browser. However, the server
+Phorge from any operating system with a web browser. However, the server
software does not run on Windows. It does run on most other operating systems,
so choose one of these instead:
- **Linux**: Most installs use Linux.
- **Mac OS X**: Mac OS X is an acceptable flavor of Linux.
- **FreeBSD**: While FreeBSD is certainly not a flavor of Linux, it is a fine
- operating system possessed of many desirable qualities, and Phabricator will
+ operating system possessed of many desirable qualities, and Phorge will
install and run properly on FreeBSD.
- **Solaris, etc.**: Other systems which look like Linux and quack like Linux
will generally work fine, although we may suffer a reduced ability to
support and resolve issues on unusual operating systems.
Beyond an operating system, you will need **a webserver**.
- **Apache**: Many installs use Apache + `mod_php`.
- **nginx**: Many installs use nginx + `php-fpm`.
- **lighttpd**: `lighttpd` is less popular than Apache or nginx, but it
works fine.
- **Other**: Other webservers which can run PHP are also likely to work fine,
although these installation instructions will not cover how to set them up.
- - **PHP Builtin Server**: Phabricator will not work with the builtin
- webserver because Phabricator depends on making requests to itself on some
+ - **PHP Builtin Server**: Phorge will not work with the builtin
+ webserver because Phorge depends on making requests to itself on some
workflows, and the builtin webserver is single-threaded.
You will also need:
- **MySQL**: You need MySQL. We strongly recommend MySQL 5.5 or newer.
- **PHP**: You need PHP 5.5 or newer.
You'll probably also need a **domain name**. In particular, you should read this
note:
-NOTE: Phabricator must be installed on an entire domain. You can not install it
-to a path on an existing domain, like `example.com/phabricator/`. Instead,
-install it to an entire domain or subdomain, like `phabricator.example.com`.
+NOTE: Phorge must be installed on an entire domain. You can not install it
+to a path on an existing domain, like `example.com/phorge/`. Instead,
+install it to an entire domain or subdomain, like `phorge.example.com`.
Level Requirements
==================
-To install and administrate Phabricator, you'll need to be comfortable with
+To install and administrate Phorge, you'll need to be comfortable with
common system administration skills. For example, you should be familiar with
using the command line, installing software on your operating system of choice,
working with the filesystem, managing processes, dealing with permissions,
editing configuration files, and setting environment variables.
If you aren't comfortable with these skills, you can still try to perform an
install. The install documentation will attempt to guide you through what you
need to know. However, if you aren't very familiar or comfortable with using
this set of skills to troubleshoot and resolve problems, you may encounter
issues which you have substantial difficulty working through.
-We assume users installing and administrating Phabricator are comfortable with
+We assume users installing and administrating Phorge are comfortable with
common system administration skills and concepts. If you aren't, proceed at
your own risk and expect that your skills may be tested.
Installing Required Components
==============================
If you are installing on Ubuntu or an RedHat derivative, there are install
scripts available which should handle most of the things discussed in this
document for you:
- **RedHat Derivatives**:
- [[ https://secure.phabricator.com/diffusion/P/browse/master/scripts/install/install_rhel-derivs.sh
+ [[ https://we.phorge.it/diffusion/P/browse/master/scripts/install/install_rhel-derivs.sh
| install_rhel-derivs.sh ]]
- **Ubuntu**:
- [[ https://secure.phabricator.com/diffusion/P/browse/master/scripts/install/install_ubuntu.sh
+ [[ https://we.phorge.it/diffusion/P/browse/master/scripts/install/install_ubuntu.sh
| install_ubuntu.sh ]]
If those work for you, you can skip directly to the
@{article:Configuration Guide}. These scripts are also available in the
`scripts/install` directory in the project itself.
Otherwise, here's a general description of what you need to install:
- git (usually called "git" in package management systems)
- Apache (usually "httpd" or "apache2") (or nginx)
- MySQL Server (usually "mysqld" or "mysql-server")
- PHP (usually "php")
- Required PHP extensions: mbstring, iconv, mysql (or mysqli), curl, pcntl
(these might be something like "php-mysql" or "php5-mysqlnd")
- Optional PHP extensions: gd
If you already have LAMP setup, you've probably already got everything you need.
It may also be helpful to refer to the install scripts above, even if they don't
work for your system.
-Now that you have all that stuff installed, grab Phabricator and its
+Now that you have all that stuff installed, grab Phorge and its
dependencies:
$ cd somewhere/ # pick some install directory
- somewhere/ $ git clone https://github.com/phacility/arcanist.git
- somewhere/ $ git clone https://github.com/phacility/phabricator.git
+ somewhere/ $ git clone https://we.phorge.it/source/arcanist.git
+ somewhere/ $ git clone https://we.phorge.it/source/phorge.git
Next Steps
==========
Continue by:
- - configuring Phabricator with the @{article:Configuration Guide}; or
- - learning how to keep Phabricator up to date with
- @{article:Upgrading Phabricator}.
+ - configuring Phorge with the @{article:Configuration Guide}; or
+ - learning how to keep Phorge up to date with
+ @{article:Upgrading Phorge}.
diff --git a/src/docs/user/introduction.diviner b/src/docs/user/introduction.diviner
index cbd739fc8d..a591aae160 100644
--- a/src/docs/user/introduction.diviner
+++ b/src/docs/user/introduction.diviner
@@ -1,43 +1,44 @@
@title Introduction
@group intro
-This document provides a high-level overview of the Phabricator project.
+This document provides a high-level overview of the Phorge project.
-= What is Phabricator? =
+= What is Phorge? =
-**Phabricator** (pronounced like the word //fabricator//) is a suite of web
+**Phorge** (pronounced like the word //forge//) is a suite of web
applications which make it easier to build software, particularly when working
-with teams. Phabricator is largely based on Facebook's internal tools.
+with teams. Phorge is a fork of Phabricator, which in turn is largely based on
+Facebook's internal tools.
-The major components of Phabricator are:
+The major components of Phorge are:
- **Differential**, a code review tool; and
- **Diffusion**, a repository browser; and
- **Maniphest**, a bug tracker; and
- **Phriction**, a wiki.
-Phabricator also includes a number of smaller tools.
+Phorge also includes a number of smaller tools.
-= Why use Phabricator? =
+= Why use Phorge? =
-Phabricator gives you a box of solid tools for a comparatively small setup cost.
+Phorge gives you a box of solid tools for a comparatively small setup cost.
The tools all work together and are richly integrated. The whole thing is free
-and open source. You own all your data. Phabricator is extremely fast and proven
+and open source. You own all your data. Phorge is extremely fast and proven
to scale both to large datasets (Facebook has 500,000+ commits across many
repositories) and large organizations (Facebook has 500+ fulltime engineers).
-Phabricator's tools are easy to learn, understand, and use.
+Phorge's tools are easy to learn, understand, and use.
-However, Phabricator may also not be a good solution for you:
+However, Phorge may also not be a good solution for you:
- If you develop primarily on Windows, you are likely to find integration
with the toolsets you use lacking.
- If you don't use SVN, Git or Mercurial, you'll have to add support for your
VCS before you can get anywhere.
- If you loathe PHP, well, it's written in PHP. Sorry. It is a victim of
circumstances. We assert it is well-written PHP, at least.
= Next Steps =
Continue by:
- - installing Phabricator with the @{article:Installation Guide}.
+ - installing Phorge with the @{article:Installation Guide}.
diff --git a/src/docs/user/reporting_security.diviner b/src/docs/user/reporting_security.diviner
index c74fc40e1a..98293964b1 100644
--- a/src/docs/user/reporting_security.diviner
+++ b/src/docs/user/reporting_security.diviner
@@ -1,36 +1,17 @@
@title Reporting Security Vulnerabilities
@group intro
-Describes how to report security vulnerabilities in Phabricator.
+Describes how to report security vulnerabilities in Phorge.
Overview
========
-Phabricator runs a disclosure and award program through
-[[ https://www.hackerone.com/ | HackerOne ]]. This program is the best way to
-submit security issues to us, and awards responsible disclosure of
-vulnerabilities with cash bounties. You can find our project page
-here:
+Phorge accepts bug reports on the upstream install. Please use the
+[[https://we.phorge.it/maniphest/task/edit/form/1/ | security reporting form]]
+to report security vulnerabilities.
-(NOTE) https://hackerone.com/phabricator
+If you aren't sure if something qualifies, you can submit the issue as a normal
+ bug report. For instructions, see @{article:Contributing Bug Reports}.
-The project page has detailed information about the scope of the program and
-how to participate.
-
-We have a 24 hour response timeline, and are usually able to respond to (and,
-very often, fix) issues more quickly than that.
-
-
-Other Channels
-==============
-
-If you aren't sure if something qualifies or don't want to report via
-HackerOne, you can submit the issue as a normal bug report. For instructions,
-see @{article:Contributing Bug Reports}.
-
-
-Get Updated
-===========
-
-General information about security changes is reported weekly in the
-[[ https://secure.phabricator.com/w/changelog/ | Changelog ]].
+General information about security changes is reported in the
+[[ https://we.phorge.it/w/changelog/ | Changelog ]].
diff --git a/src/docs/user/support.diviner b/src/docs/user/support.diviner
index 20c4b241f0..a4d7cbafca 100644
--- a/src/docs/user/support.diviner
+++ b/src/docs/user/support.diviner
@@ -1,5 +1,36 @@
@title Support Resources
@short Support
@group intro
-Effective June 1, 2021: Phabricator is no longer actively supported.
+Resources for reporting bugs, requesting features, and getting support.
+
+Overview
+========
+
+The upstream provides free support for a range of problems.
+
+
+Reporting Security Vulnerabilities
+==================================
+
+The upstream accepts, fixes, and awards bounties for reports of material
+security issues with the software.
+
+To report security issues, see @{article:Reporting Security Vulnerabilities}.
+
+
+Reporting Bugs
+==============
+
+The upstream will accept **reproducible** bug reports in modern, first-party
+production code running in reasonable environments. Before submitting a bug
+report you **must update** to the latest version of Phorge.
+
+To report bugs, see @{article:Contributing Bug Reports}.
+
+
+Contributing
+============
+
+If you'd like to contribute to Phorge, start with
+@{article:Contributor Introduction}.
diff --git a/src/docs/user/upgrading.diviner b/src/docs/user/upgrading.diviner
index 705b38e4ee..4b11317dee 100644
--- a/src/docs/user/upgrading.diviner
+++ b/src/docs/user/upgrading.diviner
@@ -1,127 +1,124 @@
-@title Upgrading Phabricator
+@title Upgrading Phorge
@group intro
-This document contains instructions for keeping Phabricator up to date.
+This document contains instructions for keeping Phorge up to date.
Overview
========
-Phabricator is under active development, and new features are released
+Phorge is under active development, and new features are released
continuously. Staying up to date will keep your install secure.
We recommend installs upgrade regularly (every 1-2 weeks). Upgrades usually go
smoothly and complete in a few minutes. If you put off upgrades for a long
time, it may take a lot more work to bring things up to date if you want access
to a useful new feature or an important security change.
Staying On Top of Changes
=========================
-We release a weekly [[https://secure.phabricator.com/w/changelog | Changelog]],
-which describes changes in the previous week. You can look at the changelogs
+We release a [[https://we.phorge.it/w/changelog | Changelog]],
+which describes changes over time. You can look at the changelogs
for an idea of what new features are available, upcoming changes, security
information, and warnings about compatibility issues or migrations.
Stable Branch
=============
-You can either run the `master` or `stable` branch of Phabricator. The `stable`
-branch is run in the [[ https://phacility.com | Phacility Cluster ]], and lags
-about a week behind `master`.
+You can either run the `master` or `stable` branch of Phorge. The `stable`
+branch is a little more stable than `master`, and may be helpful if you
+administrate a larger install.
-The `stable` branch is a little more stable than `master`, and may be helpful
-if you administrate a larger install.
-
-We promote `master` to `stable` about once a week, then publish the changelog
-and deploy the cluster. During the week, major bugfixes are cherry-picked to
-the `stable` branch. The changelog lists the `stable` hashes for that week,
-as well as any fixes which were cherry-picked.
+We promote `master` to `stable` frequently, then publish the changelog. During
+the week, major bugfixes are cherry-picked to the `stable` branch. The changelog
+ lists the `stable` hashes for that week, as well as any fixes which were
+ cherry-picked.
To switch to `stable`, check the branch out in each working copy:
- phabricator/ $ git checkout stable
+ phorge/ $ git checkout stable
arcanist/ $ git checkout stable
You can now follow the upgrade process normally.
Upgrade Process
===============
-IMPORTANT: You **MUST** restart Phabricator after upgrading. For help, see
-@{article:Restarting Phabricator}.
+IMPORTANT: You **MUST** restart Phorge after upgrading. For help, see
+@{article:Restarting Phorge}.
-IMPORTANT: You **MUST** upgrade `arcanist` and `phabricator` at the same time.
+IMPORTANT: You **MUST** upgrade `arcanist` and `phorge` at the same time.
-Phabricator runs on many different systems, with many different webservers.
+Phorge runs on many different systems, with many different webservers.
Given this diversity, we don't currently maintain a comprehensive upgrade
script which can work on any system. However, the general steps are the same
on every system:
- Stop the webserver (including `php-fpm`, if you use it).
- - Stop the daemons, with `phabricator/bin/phd stop`.
- - Run `git pull` in `arcanist/` and `phabricator/`.
- - Run `phabricator/bin/storage upgrade`.
- - Start the daemons, with `phabricator/bin/phd start`.
+ - Stop the daemons, with `phorge/bin/phd stop`.
+ - Run `git pull` in `arcanist/` and `phorge/`.
+ - Run `phorge/bin/storage upgrade`.
+ - Start the daemons, with `phorge/bin/phd start`.
- Restart the webserver (and `php-fpm`, if you stopped it earlier).
For some more discussion details, see @{article:Configuration Guide}.
-This template script roughly outlines the steps required to upgrade Phabricator.
+This template script roughly outlines the steps required to upgrade Phorge.
You'll need to adjust paths and commands a bit for your particular system:
```lang=sh
#!/bin/sh
set -e
set -x
-# This is an example script for updating Phabricator, similar to the one used to
-# update <https://secure.phabricator.com/>. It might not work perfectly on your
+# This is an example script for updating Phorge, similar to the one used to
+# update <https://we.phorge.it/>. It might not work perfectly on your
# system, but hopefully it should be easy to adapt. This script is not intended
# to work without modifications.
# NOTE: This script assumes you are running it from a directory which contains
-# arcanist/, and phabricator/.
+# arcanist/, and phorge/.
ROOT=`pwd` # You can hard-code the path here instead.
### STOP WEB SERVER AND DAEMONS ###############################################
# Stop daemons.
-$ROOT/phabricator/bin/phd stop
+$ROOT/phorge/bin/phd stop
# If running the notification server, stop it.
-# $ROOT/phabricator/bin/aphlict stop
+# $ROOT/phorge/bin/aphlict stop
# Stop the webserver (apache, nginx, lighttpd, etc). This command will differ
# depending on which system and webserver you are running: replace it with an
# appropriate command for your system.
# NOTE: If you're running php-fpm, you should stop it here too.
sudo /etc/init.d/httpd stop
### UPDATE WORKING COPIES ######################################################
cd $ROOT/arcanist
git pull
-cd $ROOT/phabricator
+cd $ROOT/phorge
git pull
# Upgrade the database schema. You may want to add the "--force" flag to allow
# this script to run noninteractively.
-$ROOT/phabricator/bin/storage upgrade
+$ROOT/phorge/bin/storage upgrade
# Restart the webserver. As above, this depends on your system and webserver.
# NOTE: If you're running php-fpm, restart it here too.
sudo /etc/init.d/httpd start
# Restart daemons.
-$ROOT/phabricator/bin/phd start
+$ROOT/phorge/bin/phd start
# If running the notification server, start it.
-# $ROOT/phabricator/bin/aphlict start
+# $ROOT/phorge/bin/aphlict start
```
diff --git a/src/docs/user/userguide/almanac.diviner b/src/docs/user/userguide/almanac.diviner
index 9250724c03..5b13a9f80a 100644
--- a/src/docs/user/userguide/almanac.diviner
+++ b/src/docs/user/userguide/almanac.diviner
@@ -1,182 +1,182 @@
@title Almanac User Guide
@group userguide
Using Almanac to manage devices and services.
Overview
========
Almanac is a device and service inventory application. It allows you to create
lists of //devices// and //services// that humans and other applications can
use to keep track of what is running where.
Almanac is an infrastructure application that will normally be used by
-administrators to configure advanced Phabricator features. In most cases,
+administrators to configure advanced Phorge features. In most cases,
normal users will very rarely interact with Almanac directly.
At a very high level, Almanac can be thought of as a bit like a DNS server.
Callers ask it for information about services, and it responds with details
about which devices host those services. However, it can respond to a broader
range of queries and provide more detailed responses than DNS alone can.
-Today, the primary use cases for Almanac are internal to Phabricator:
+Today, the primary use cases for Almanac are internal to Phorge:
- Providing a list of build servers to Drydock so it can run build and
integration tasks.
- - Configuring Phabricator to operate in a cluster setup.
+ - Configuring Phorge to operate in a cluster setup.
Beyond internal uses, Almanac is a general-purpose service and device inventory
application and can be used to configure and manage other types of service and
hardware inventories, but these use cases are currently considered experimental
and you should be exercise caution in pursuing them.
Example: Drydock Build Pool
================================
Here's a quick example of how you might configure Almanac to solve a real-world
problem. This section describes configuration at a high level to give you an
introduction to Almanac concepts and a better idea of how the pieces fit
together.
In this scenario, we want to use Drydock to run some sort of build process. To
do this, Drydock needs hardware to run on. We're going to use Almanac to give
Drydock a list of hosts it should use.
In this scenario, Almanac will work a bit like a DNS server. When we're done,
Drydock will be able to query Almanac for information about a service (like
`build.mycompany.com`) and get back information about which hosts are part of
that service and which addresses/ports it should connect to.
Before getting started, we need to create a **network**. For simplicity, let's
suppose everything will be connected through the public internet. If you
haven't already, you'd create a "Public Internet" network first.
Once we have a network, we create the actual physical or virtual hosts by
launching instances in EC2, or racking and powering on some servers, or already
having some hardware on hand we want to use. We set the hosts up normally and
connect them to the internet (or another network).
After the hosts exist, we add them to Almanac as **devices**, like
`build001.mycompany.com`, `build002.mycompany.com`, and so on. In Almanac,
devices are usually physical or virtual hosts, although you could also use it
to inventory other types of devices and hardware.
For each **device**, we add an **interface**. This is just an address and port
on a particular network. Since we're going to connect to these hosts over
SSH, we'll add interfaces on the standard SSH port 22. An example configuration
might look a little bit like this:
| Device | Network | Address | Port |
|--------|---------|---------|------|
| `build001.mycompany.com` | Public Internet | 58.8.9.10 | 22
| `build002.mycompany.com` | Public Internet | 58.8.9.11 | 22
| ... | Public Internet | ... | 22
Now, we create the **service**. This is what we'll tell Drydock about, and
it can query for information about this service to find connected devices.
Here, we'll call it `build.mycompany.com`.
After creating the service, add **bindings** to the interfaces we configured
above. This will tell Drydock where it should actually connect to.
Once this is complete, we're done in Almanac and can continue configuration in
Drydock, which is outside the scope of this example. Once everything is fully
configured, this is how Almanac will be used by Drydock:
- Drydock will query information about `build.mycompany.com` from Almanac.
- Drydock will get back a list of bound interfaces, among other data.
- The interfaces provide information about addresses and ports that Drydock
can use to connect to the actual devices.
You can now add and remove devices to the pool by binding them and unbinding
them from the service.
Concepts
========
The major concepts in Almanac are **devices**, **interfaces**, **services**,
**bindings**, **networks**, and **namespaces**.
**Devices**: Almanac devices represent physical or virtual devices.
Usually, they are hosts (like `web001.mycompany.net`), although you could
use devices to keep inventory of any other kind of device or physical asset
(like phones, laptops, or office chairs).
Each device has a name, and may have properties and interfaces.
**Interfaces**: Interfaces are listening address/port combinations on devices.
For example, if you have a webserver host device named `web001.mycompany.net`,
you might add an interface on port `80`.
Interfaces tell users and applications where they should connect to to access
services and devices.
**Services**: These are named services like `build.mycompany.net` that work
a bit like DNS. Humans or other applications can look up a service to find
configuration information and learn which devices are hosting the service.
Each service has a name, and may have properties and bindings.
**Bindings**: Bindings are connections between services and interfaces. They
tell callers which devices host a named service.
**Networks**: Networks allow Almanac to distinguish between addresses on
different networks, like VPNs vs the public internet.
If you have hosts in different VPNs or on private networks, you might have
multiple devices which share the same IP address (like `10.0.0.3`). Networks
allow Almanac to distinguish between devices with the same address on different
sections of the network.
**Namespaces**: Namespaces let you control who is permitted to create devices
and services with particular names. For example, the namespace `mycompany.com`
controls who can create services with names like `a.mycompany.com` and
`b.mycompany.com`.
Namespaces
==========
Almanac namespaces allow you to control who can create services and devices
with certain names.
If you keep a list of cattle as devices with names like
`cow001.herd.myranch.moo`, `cow002.herd.myranch.moo`, you might have some
applications which query for all devices in `*.herd.myranch.moo`, and thus
want to limit who can create devices there in order to prevent mistakes.
If a namespace like `herd.myranch.moo` exists, users must have permission to
edit the namespace in order to create new services, devices, or namespaces
within it. For example, a user can not create `cow003.herd.myranch.moo` if
they do not have edit permission on the `herd.myranch.moo` namespace.
When you try to create a `cow003.herd.myranch.moo` service (or rename an
existing service to have that name), Almanac looks for these namespaces, then
checks the policy of the first one it finds:
| Namespace |
|----|-----
| `cow003.herd.ranch.moo` | //"Nearest" namespace, considered first.//
| `herd.ranch.moo` | |
| `ranch.moo` | |
| `moo` | //"Farthest" namespace, considered last.//
Note that namespaces treat names as lists of domain parts, not as strict
substrings, so the namespace `herd.myranch.moo` does not prevent
someone from creating `goatherd.myranch.moo` or `goat001.goatherd.myranch.moo`.
The name `goatherd.myranch.moo` is not part of the `herd.myranch.moo` namespace
because the initial subdomain differs.
If a name belongs to multiple namespaces, the policy of the nearest namespace
is controlling. For example, if `myranch.moo` has a very restrictive edit
policy but `shed.myranch.moo` has a more open one, users can create devices and
services like `rake.shed.myranch.moo` as long as they can pass the policy check
for `shed.myranch.moo`, even if they do not have permission under the policy
for `myranch.moo`.
Users can edit services and devices within a namespace if they have edit
permission on the service or device itself, as long as they don't try to rename
the service or device to move it into a namespace they don't have permission
to access.
diff --git a/src/docs/user/userguide/amazon_rds.diviner b/src/docs/user/userguide/amazon_rds.diviner
index 442d499013..aab2058140 100644
--- a/src/docs/user/userguide/amazon_rds.diviner
+++ b/src/docs/user/userguide/amazon_rds.diviner
@@ -1,27 +1,27 @@
@title User Guide: Amazon RDS
@group config
Discusses using Amazon RDS as a database.
Overview
========
-Phabricator works with Amazon RDS. However, most of our documentation and setup
+Phorge works with Amazon RDS. However, most of our documentation and setup
checks assume you are using local MySQL, and upstream support is less available
for RDS.
If you use RDS, you'll need to do a few things a bit differently than you would
with local MySQL, especially when configuring RDS. This document documents some
of the differences you'll encounter when using RDS.
Configuration
=============
The documentation and various setup warnings will sometimes direct you to make
configuration changes in `my.cnf`. In Amazon RDS, you don't have direct access
to `my.cnf` and thus can not make these changes in the configuration file.
Instead, you can use [[ http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html | DB Parameter Groups ]].
You can access these from your AWS console and use the web interface to make
necessary changes. The web UI will give you a user-friendly key-value table:
just identify the option you need to change, then select a new value for it.
diff --git a/src/docs/user/userguide/arcanist.diviner b/src/docs/user/userguide/arcanist.diviner
index 0de18a9358..943c8a2193 100644
--- a/src/docs/user/userguide/arcanist.diviner
+++ b/src/docs/user/userguide/arcanist.diviner
@@ -1,172 +1,172 @@
@title Arcanist User Guide
@group userguide
-Guide to Arcanist, a command-line interface to Phabricator.
+Guide to Arcanist, a command-line interface to Phorge.
-Arcanist provides command-line access to many Phabricator tools (like
+Arcanist provides command-line access to many Phorge tools (like
Differential, Files, and Paste), integrates with static analysis ("lint") and
unit tests, and manages common workflows like getting changes into Differential
for review.
A detailed command reference is available by running `arc help`. This
document provides an overview of common workflows and installation.
Arcanist has technical, contributor-focused documentation here:
-<https://secure.phabricator.com/book/arcanist/>
+<https://we.phorge.it/book/arcanist/>
= Quick Start =
A quick start guide is available at @{article:Arcanist Quick Start}. It provides
a much more compact summary of how to get `arc` set up and running for a new
project. You may want to start there, and return here if you need more
information.
= Overview =
Arcanist is a wrapper script that sits on top of other tools (e.g.,
Differential, linters, unit test frameworks, git, Mercurial, and SVN) and
provides a simple command-line API to manage code review and some related
revision control operations.
For a detailed list of all available commands, run:
$ arc help
For detailed information about a specific command, run:
$ arc help <command>
Arcanist allows you to do things like:
- get detailed help about available commands with `arc help`
- send your code to Differential for review with `arc diff` (for detailed
instructions, see @{article:Arcanist User Guide: arc diff})
- show pending revision information with `arc list`
- find likely reviewers for a change with `arc cover`
- apply changes in a revision to the working copy with `arc patch`
- download a patch from Differential with `arc export`
- update Git commit messages after review with `arc amend`
- commit SVN changes with `arc commit`
- push Git and Mercurial changes with `arc land`
- view enhanced information about Git branches with `arc branch`
Once you've configured lint and unit test integration, you can also:
- check your code for syntax and style errors with `arc lint`
(see @{article:Arcanist User Guide: Lint})
- run unit tests that cover your changes with `arc unit`
Arcanist integrates with other tools:
- upload and download files with `arc upload` and `arc download`
- create and view pastes with `arc paste`
Arcanist has some advanced features as well, you can:
- execute Conduit method calls with `arc call-conduit`
- create or update libphutil libraries with `arc liberate`
- activate tab completion with `arc shell-complete`
- ...or extend Arcanist and add new commands.
Except where otherwise noted, these workflows are generally agnostic to the
underlying version control system and will work properly in git, Mercurial, or
SVN repositories.
= Installing Arcanist =
Arcanist is meant to be installed on your local machine or development server --
whatever machine you're editing code on. It runs on:
- Linux;
- Other operating systems which are pretty similar to Linux, or which
Linux is pretty similar to;
- FreeBSD, a fine operating system held in great esteem by many;
- Mac OS X (see @{article:Arcanist User Guide: Mac OS X}); and
- Windows (see @{article:Arcanist User Guide: Windows}).
Arcanist is written in PHP, so you need to install the PHP CLI first if you
don't already have it. Arcanist should run on PHP 5.2 and newer. If you don't
have PHP installed, you can download it from <http://www.php.net/>.
To install Arcanist, pick an install directory and clone the code from GitHub:
some_install_path/ $ git clone https://github.com/phacility/arcanist.git
Now add `some_install_path/arcanist/bin/` to your PATH environment variable.
When you type "arc", you should see something like this:
Usage Exception: No command provided. Try 'arc help'.
If you get that far, you've done things correctly. If you get an error or have
trouble getting this far, see these detailed guides:
- On Windows: @{article:Arcanist User Guide: Windows}
- On Mac OS X: @{article:Arcanist User Guide: Mac OS X}
You can later upgrade Arcanist to the latest version with `arc upgrade`:
$ arc upgrade
== Installing Arcanist for a Team ==
Arcanist changes quickly, so it can be something of a headache to get it
installed and keep people up to date. Here are some approaches you might be
able to use:
- Facebook does most development on development servers, which have a standard
environment and NFS mounts. Arcanist lives on an
NFS mount, and the default `.bashrc` adds them to the PATH. Updating the
mount source updates everyone's versions, and new employees have a working
`arc` when they first log in.
- Another common approach is to write an install script as an action into
existing build scripts, so users can run `make install-arc` or
`ant install-arc` or similar.
== Installing Tab Completion ==
If you use `bash`, you can set up tab completion by running this command:
$ arc shell-complete
This will install shell completion into your current shell. After installing,
you may need to start a new shell (or open a new terminal window) to pick up
the updated configuration.
== Configuration ==
Some Arcanist commands can be configured. This configuration is read from
three sources, in order:
# A project can specify configuration in an `.arcconfig` file. This file is
JSON, and can be updated using `arc set-config --local` or by editing
it manually.
# User configuration is read from `~/.arcconfig`. This file is JSON, and can
be updated using `arc set-config`.
# Host configuration is read from `/etc/arcconfig` (on Windows, the path
- is `C:\ProgramData\Phabricator\Arcanist\config`).
+ is `C:\ProgramData\Phorge\Arcanist\config`).
Arcanist uses the first definition it encounters as the runtime setting.
Existing settings can be printed with `arc get-config`.
Use `arc help set-config` and `arc help get-config` for more information
about reading and writing configuration.
== Next Steps ==
Continue by:
- setting up a new project for use with `arc`, with
@{article:Arcanist User Guide: Configuring a New Project}; or
- learning how to use `arc` to send changes for review with
@{article:Arcanist User Guide: arc diff}.
Advanced topics are also available. These are detailed guides to configuring
technical features of `arc` that refine its behavior. You do not need to read
them to get it working.
- @{article:Arcanist User Guide: Commit Ranges}
- @{article:Arcanist User Guide: Lint}
- @{article:Arcanist User Guide: Customizing Existing Linters}
- @{article:Arcanist User Guide: Customizing Lint, Unit Tests and Workflows}
- @{article:Arcanist User Guide: Code Coverage}
diff --git a/src/docs/user/userguide/arcanist_coverage.diviner b/src/docs/user/userguide/arcanist_coverage.diviner
index cb25c0cc74..b1b449a7db 100644
--- a/src/docs/user/userguide/arcanist_coverage.diviner
+++ b/src/docs/user/userguide/arcanist_coverage.diviner
@@ -1,69 +1,69 @@
@title Arcanist User Guide: Code Coverage
@group userguide
-Explains code coverage features in Arcanist and Phabricator.
+Explains code coverage features in Arcanist and Phorge.
This is a configuration guide that helps you set up advanced features. If you're
just getting started, you don't need to look at this yet. Instead, start with
the @{article:Arcanist User Guide}.
Before you can configure coverage features, you must set up unit test
integration. For instructions, see @{article:Arcanist User Guide: Configuring
a New Project} and @{article:Arcanist User Guide: Customizing
Lint, Unit Tests and Workflows}.
= Using Coverage Features =
If your project has unit tests with coverage integration (see below for
instructions on setting it up), you can use "arc" to show coverage reports.
For example:
arc unit --detailed-coverage src/some/file.php
Depending on how your test engine is configured, this will run tests relevant
to `src/some/file.php` and give you a detailed coverage report.
If the test engine enables coverage by default, it will be uploaded to
Differential and displayed in the right gutter when viewing diffs.
-= Enabling Coverage for Arcanist and Phabricator =
+= Enabling Coverage for Arcanist and Phorge =
-If you're contributing, Arcanist and Phabricator support coverage if
+If you're contributing, Arcanist and Phorge support coverage if
you install Xdebug:
http://xdebug.org/
It should be sufficient to correctly install Xdebug; coverage information will
be automatically enabled.
= Building Coverage Support =
To add coverage support to a unit test engine, just call `setCoverage()` when
building @{class@arcanist:ArcanistUnitTestResult} objects. Provide a map of
file names (relative to the working copy root) to coverage report strings.
Coverage report strings look like this:
NNNNNCCCNNNNNNNNCCCCCCNNNUUUNNNNN
Each line in the file is represented by a character. Valid characters are:
- **N** Not executable. This is a comment or whitespace which should be
ignored when computing test coverage.
- **C** Covered. This line has test coverage.
- **U** Uncovered. This line is executable but has no test coverage.
- **X** Unreachable. If your coverage analysis can detect unreachable code,
you can report it here.
This format is intended to be as simple as possible. A valid coverage result
might look like this:
array(
'src/example.php' => 'NNCNNNCNUNNNUNUNUNUNUNC',
'src/other.php' => 'NNUNNNUNCNNNUNUNCNCNCNU',
);
You may also want to filter coverage information to the paths passed to the
unit test engine. See @{class@arcanist:PhutilTestCase} and
@{class@arcanist:PhutilUnitTestEngine} for an example of coverage integration
in PHP using Xdebug.
diff --git a/src/docs/user/userguide/arcanist_diff.diviner b/src/docs/user/userguide/arcanist_diff.diviner
index 4140deb537..7d5a0161d9 100644
--- a/src/docs/user/userguide/arcanist_diff.diviner
+++ b/src/docs/user/userguide/arcanist_diff.diviner
@@ -1,208 +1,208 @@
@title Arcanist User Guide: arc diff
@group userguide
Guide to running `arc diff`, to send changes to Differential for review.
This article assumes you have `arc` installed and running; if not, see
@{article:Arcanist User Guide} for help getting it set up.
Before running `arc diff`, you should create a `.arcconfig` file. If someone
set things up for you, they may already have done this. See
@{article:Arcanist User Guide: Configuring a New Project} for instructions and
information.
= Overview =
While `arc` has a large number of commands that interface with various
-Phabricator applications, the primary use of `arc` is to send changes for
+Phorge applications, the primary use of `arc` is to send changes for
review in Differential (for more information on Differential, see
@{article:Differential User Guide}). If you aren't familiar with Differential,
it may be instructive to read that article first to understand the big picture
of how the code review workflow works.
You send changes for review by running `arc diff`. The rest of this document
explains how to use `arc diff`, and how the entire review workflow operates for
different version control systems.
= Subversion =
In Subversion, `arc diff` sends the **uncommitted changes in the working copy**
for review.
To **create a revision** in SVN:
$ nano source_code.c # Make changes.
$ arc diff
This will prompt you for information about the revision. To later **update an
existing revision**, just do the same thing:
$ nano source_code.c # Make more changes.
$ arc diff
This time, `arc` will prompt you to update the revision. Once your revision has
been accepted, you can commit it like this:
$ arc commit
= Git =
In Git, `arc diff` sends **all commits in a range** for review. By default,
this range is:
`git merge-base origin/master HEAD`..HEAD
That's a fancy way of saying "all the commits on the current branch that
you haven't pushed yet". So, to **create a revision** in Git, run:
$ nano source_code.c # Make changes.
$ git commit -a # Commit changes.
$ arc diff # Creates a new revision out of ALL unpushed commits on
# this branch.
The `git commit` step is optional. If there are uncommitted changes in the
working copy then Arcanist will ask you to create a commit from them.
Since it uses **all** the commits on the branch, you can make several commits
before sending your changes for review if you prefer.
You can specify a different commit range instead by running:
$ arc diff <commit>
This means to use the range:
`git merge-base <commit> HEAD`..HEAD
However, this is a relatively advanced feature. The default is usually correct
if you aren't creating branches-on-branches, juggling remotes, etc.
To **update a revision**, just do the same thing:
$ nano source_code.c # Make more changes.
$ git commit -a # Commit them.
$ arc diff # This prompts you to update revision information.
The `git commit` step is optional. If there are uncommitted changes in the
working copy then Arcanist will ask you to amend them to the commit.
When your revision has been accepted, you can usually push it like this:
$ arc land <branch> # Merges <branch> into master and pushes.
`arc land` makes some assumptions about your workflow which might not be
true. Consult the documentation before you use it. You should also look at
`arc amend`, which may fit your workflow better.
= Mercurial =
In Mercurial, `arc diff` sends **all commits in a range** for review. By
default, this range is changes between the first non-outgoing parent of any
revision in history and the directory state. This is a fancy way of saying
"every outgoing change since the last merge". It includes any uncommitted
changes in the working copy, although you will be prompted to include these.
To **create a revision** in Mercurial, run:
$ nano source_code.c # Make changes.
$ hg commit # Commit changes.
$ arc diff # Creates a new revision out of ALL outgoing commits
# on this branch since the last merge.
The `hg commit` step is optional. If there are uncommitted changes in the
working copy then Arcanist will ask you to create a commit from them.
Since it uses **all** the outgoing commits on the branch, you can make several
commits before sending your changes for review if you prefer.
You can specify a different commit range instead by running:
$ arc diff <commit>
This means to use the range from that commit to the directory state. However,
this is an advanced feature and the default is usually correct.
To **update a revision**, just do the same thing:
$ nano source_code.c # Make changes.
$ hg commit # Commit changes.
$ arc diff # This prompts you to update revision information.
The `hg commit` step is optional. If there are uncommitted changes in the
working copy then Arcanist will ask you to create a commit from them (or amend
them to the previous commit if supported).
When your revision has been accepted, push it normally. (`arc` does not have
push integration in Mercurial because it can't force merges and thus can't
guarantee it will be able to do anything useful.)
= Pushing and Closing Revisions =
After changes have been accepted, you generally push them and close the
revision. `arc` has several workflows which help with this, by:
- squashing or merging changes from a feature branch into a master branch
(if relevant);
- formatting a good commit message with all the information from Differential;
and
- automatically closing the revision.
You don't need to use any of these workflows: you can just run `git push`,
`hg push` or `svn commit` and then manually close the revision from the web.
However, these workflows can make common development strategies more convenient,
and give you better commit messages in the repository. The workflows `arc`
supports are:
- `arc land`: Works in Git if you develop in feature branches. Does a merge
or squash-merge from your feature branch into some master branch, provides
a detailed commit message, pushes master, and then deletes your branch.
- `arc amend`: Works in Git if you can't use `arc land`. Amends HEAD with
a detailed commit message.
- `arc commit`: Works in Subversion. Runs `svn commit` with a detailed commit
message.
- `arc close-revision`: Works anywhere, closes a revision from the CLI
without going through the web UI.
You can use `arc help <command>` for detailed help with any of these.
Differential will make a guess about a next step on accepted revisions, but it
may not be the best next step for your workflow.
-Phabricator will also automatically close revisions if the changes are pushed
+Phorge will also automatically close revisions if the changes are pushed
to a repository that is tracked in Diffusion. Specifically, it will close
revisions based on commit and tree hashes, and `Differential Revision`
identifiers in commit messages.
If you push to an untracked repository (or `arc` can't figure out that it's
tracked), `arc land`, `arc amend` and `arc commit` will implicitly run
`arc close-revision`.
= General Information =
This information is not unique to a specific version control system.
== Force Diff Only ==
You can create just a diff (rather than a revision) with `--preview` (or
`--only`, but this disables other features). You can later use it to create
or update a revision from the web UI.
== Other Diff Sources ==
You can create a diff out of an arbitrary patch file by using `--raw` and piping
it to stdin. In most cases this will only create a diff, not a revision. You
can use the web UI to create a revision from the diff, or update an existing
revision.
== Force Create / Update ==
`arc` uses information about the working copy (like the path, branch name, local
commit hashes, and local tree hashes, depending on which version control system
you are using) to figure out whether you intend to create or update a revision.
If it guesses incorrectly, you can force it to either create or update a
revision with:
$ arc diff --create # Force "create".
$ arc diff --update <revision> # Force "update".
You can figure out what `arc` believes to be in the working copy with
`arc which`.
diff --git a/src/docs/user/userguide/arcanist_lint_unit.diviner b/src/docs/user/userguide/arcanist_lint_unit.diviner
index 6ecc9aaa3e..dc7f336fef 100644
--- a/src/docs/user/userguide/arcanist_lint_unit.diviner
+++ b/src/docs/user/userguide/arcanist_lint_unit.diviner
@@ -1,92 +1,92 @@
@title Arcanist User Guide: Customizing Lint, Unit Tests and Workflows
@group userguide
Explains how to build new classes to control how Arcanist behaves.
This is a configuration guide that helps you set up advanced features. If you're
just getting started, you don't need to look at this yet. Instead, start with
the @{article:Arcanist User Guide}.
= Overview =
Arcanist has some basic configuration options available in the `.arcconfig`
file (see @{article:Arcanist User Guide: Configuring a New Project}), but it
can't handle everything. If you want to customize Arcanist at a deeper level,
you need to build new classes. For instance:
- if you want to configure linters, or add new linters, you need to create a
new class which extends @{class@arcanist:ArcanistLintEngine}.
- if you want to integrate with a unit testing framework, you need to create a
new class which extends @{class@arcanist:ArcanistUnitTestEngine}.
- if you you want to change how workflows behave, or add new workflows, you
need to create a new class which extends
@{class@arcanist:ArcanistConfiguration}.
Arcanist works through a sort of dependency-injection approach. For example,
Arcanist does not run lint rules by default, but you can set `lint.engine`
in your `.arcconfig` to the name of a class which extends
@{class@arcanist:ArcanistLintEngine}. When running from inside your project,
Arcanist will load this class and call methods on it in order to run lint. To
make this work, you need to do three things:
- actually write the class;
- add the library where the class exists to your `.arcconfig`;
- add the class name to your `.arcconfig` as the **lint.engine**,
**unit.engine**, or **arcanist_configuration**.
= Create a libphutil Library =
If you haven't created a library for the class to live in yet, you need to do
that first. Follow the instructions in
-@{article@phabcontrib:Adding New Classes}, then make the library loadable by
+@{article@contrib:Adding New Classes}, then make the library loadable by
adding it to your `.arcconfig` like this:
{
// ...
"load" : [
// ...
"/path/to/my/library", // Absolute path
"support/arcanist", // Relative path in this project
// ...
]
// ...
}
You can either specify an absolute path, or a path relative to the project root.
When you run `arc list --trace`, you should see a message to the effect that
it has loaded your library.
For debugging or testing, you can also run Arcanist with the
`--load-phutil-library` flag:
arc --load-phutil-library=/path/to/library <command>
You can specify this flag more than once to load several libraries. Note that
if you use this flag, Arcanist will ignore any libraries listed in
`.arcconfig`.
= Use the Class =
This step is easy: just edit `.arcconfig` to specify your class name as
the appropriate configuration value.
{
// ...
"lint.engine" : "CustomArcanistLintEngine",
// ...
}
Now, when you run Arcanist in your project, it will invoke your class when
appropriate.
For lint and unit tests, you can also use the `--engine` flag override the
default engine:
arc lint --engine MyCustomArcanistLintEngine
This is mostly useful for debugging and testing.
= Next Steps =
- Learn how to reuse existing linters by reading
@{article:Arcanist User Guide: Customizing Existing Linters}.
diff --git a/src/docs/user/userguide/arcanist_new_project.diviner b/src/docs/user/userguide/arcanist_new_project.diviner
index a8e8e49202..86e57c0c86 100644
--- a/src/docs/user/userguide/arcanist_new_project.diviner
+++ b/src/docs/user/userguide/arcanist_new_project.diviner
@@ -1,223 +1,223 @@
@title Arcanist User Guide: Configuring a New Project
@group userguide
Explains how to configure Arcanist projects with `.arcconfig` files.
= Overview =
In most cases, you should be able to use `arc` without specifically configuring
your project for it. If you want to adjust `arc` behaviors, you can create a
`.arcconfig` file in your project to provide project-specific settings.
= .arcconfig Basics =
An `.arcconfig` file is a JSON file which you check into your project's root.
Arcanist uses `.arcconfig` files to customize a number of things about its
behavior. The first thing you're likely to want to configure is the URI
-for your Phabricator install. A simple, valid file looks something like this:
+for your Phorge install. A simple, valid file looks something like this:
name=.arcconfig
{
- "phabricator.uri" : "https://phabricator.example.com/"
+ "phabricator.uri" : "https://phorge.example.com/"
}
For details on available options, see below.
NOTE: You should commit your `.arcconfig` file! It contains project
configuration, not user configuration.
= Advanced .arcconfig =
Common options are:
- - **phabricator.uri**: the URI for the Phabricator install that `arc` should
+ - **phabricator.uri**: the URI for the Phorge install that `arc` should
connect to when run in this project. This option was previously called
`conduit_uri`.
- **repository.callsign**: The callsign of this repository in Diffusion.
Normally, `arc` can detect this automatically, but if it can't figure it out
you can specify it explicitly. Use `arc which` to understand the detection
process.
- **history.immutable**: Configures `arc` to use workflows which never rewrite
history in the working copy. By default, `arc` will perform some rewriting
of unpublished history (amending commit messages, squash merging) on some
workflows in Git. The distinctions are covered in detail below.
Other options include:
- **load**: list of additional Phutil libraries to load at startup.
See below for details about path resolution, or see
- @{article@phabcontrib:Adding New Classes} for a general introduction to
+ @{article@contrib:Adding New Classes} for a general introduction to
libphutil libraries.
- **https.cabundle**: specifies the path to an alternate certificate bundle
for use when making HTTPS connections.
- **lint.engine**: the name of a subclass of
@{class@arcanist:ArcanistLintEngine}, which should be used to apply lint
rules to this project. See @{article:Arcanist User Guide: Lint}.
- **unit.engine**: the name of a subclass of
@{class@arcanist:ArcanistUnitTestEngine}, which should be used to apply
unit test rules to this project. See
@{article:Arcanist User Guide: Customizing Lint, Unit Tests and Workflows}.
These options are supported, but their use is discouraged:
- **http.basicauth.user**: specify an HTTP basic auth username for use when
- connecting to Phabricator.
+ connecting to Phorge.
- **http.basicauth.pass**: specify an HTTP basic auth password for use when
- connecting to Phabricator.
+ connecting to Phorge.
- **https.blindly-trust-domains**: a list of domains to trust blindly over
HTTPS, even if their certificates are invalid. This is a brute force
solution to certificate validity problems, and is discouraged. Instead,
use valid certificates.
For a complete list of options, run `arc get-config`. Although all
options can be set in `.arcconfig`, some options (like `editor`) usually do not
make sense to set here because they're likely to vary from user to user.
= History Mutability =
Arcanist workflows run in two broad modes: either history is //mutable// or
//immutable//. Under a //mutable// history, `arc` commands may rewrite the
working copy history; under an //immutable// history, they may not.
You control history mutability by setting `history.immutable` to `true` or
`false` in your configuration. By default, it is `false` in Git (i.e.,
//mutable//) and `true` in Mercurial (i.e., //immutable//). The sections below
explain how these settings affect workflows.
== History Mutability: Git ==
In a workflow with //mutable// history, you rewrite local history. You develop
in feature branches, but squash or amend before pushing by using `git commit
--amend`, `git rebase -i`, or `git merge --squash`. Generally, one idea in
the remote is represented by one commit.
In a workflow with //immutable// history, you do not rewrite local history. You
develop in feature branches and push them without squashing commits. You do not
use `git commit --amend` or `git rebase -i`. Generally, one idea in the
remote is represented by many commits.
Practically, these are the differences you'll see based on your setting:
- **Mutable**
- `arc diff` will prompt you to amend lint changes into HEAD.
- `arc diff` will amend the commit message in HEAD after creating a
revision.
- `arc land` will default to the `--squash` strategy.
- `arc amend` will amend the commit message in HEAD with information from
the corresponding or specified Differential revision.
- **Immutable**
- `arc diff` will abort if it makes lint changes.
- `arc diff` will not amend the commit message in HEAD after creating a
revision.
- `arc land` will default to the `--merge` strategy.
- `arc amend` will exit with an error message.
== History Mutability: Mercurial ==
Before version 2.2, stock Mercurial has no history mutation commands, so
this setting has no effect. With Mercurial 2.2. or newer, making history
//mutable// means:
- **Mutable** (versions 2.2 and newer)
- `arc diff` will amend the commit message in `.` after creating a
revision.
- `arc amend` will amend the commit message in `.` with information from
the corresponding or specified Differential revision.
- **Immutable** (or versions prior to 2.2)
- `arc diff` will not amend the commit message in `.` after creating a
revision.
- `arc amend` will exit with an error message.
= How Libraries Are Located =
If you specify an external library to load, like 'examplelib', and use a
relative path like this:
{
...
"load": [
"examplelib/src"
],
...
}
...arc looks for it by trying these paths:
- `path/to/root/examplelib/src/` First, arc looks in the project's root
directory (where the `.arcconfig` lives) to see if the library is part of
the project. This makes it easy to just put project-specific code in a
project.
- `path/to/root/../examplelib/src/` Next, arc looks //next to// the project's
root directory to see if the library is in a sibling directory. If you
work with several repositories, this makes it easy to put all the `arc`
code in one repository and just check it out in the same directory as
everything else.
- `php/include/path/examplelib/src` Finally, arc falls back to PHP, which
will look in paths described in the `include_path` php.ini setting. This
allows you to install libraries in some global location if you prefer.
You can alternately supply an absolute path, like `/var/arc/examplelib/src`, but
then everyone will need to install the library at that exact location.
NOTE: Specify the path to the directory which includes
`__phutil_library_init__.php`. For example, if your init file is in
`examplelib/src/__phutil_library_init__.php`, specify `examplelib/src`,
not just `examplelib/`.
The general intent here is:
- Put project-specific code in some directory in the project, like
`support/arc/src/`.
- Put shared code (e.g., which enforces general coding standards or hooks
up to unit tests or whatever) in a separate repository and check it out
next to other repositories.
- Or put everything in some standard location and add it to `include_path`.
= Running Without .arcconfig =
Although you don't need to set up `.arcconfig`, and you can run `arc` command
that require a working copy in any Git, Subversion or Mercurial working copy,
some features won't work unless you set up an `.arcconfig` file.
Without `.arcconfig`:
- - You will need to set a default Phabricator URI with
+ - You will need to set a default Phorge URI with
`arc set-config default <uri>`, or specify an explicit URI
with `--conduit-uri` each time you run a command.
- You will not be able to run linters through arc unless you pass `--engine`
explicitly.
- You will not be able to customize certain linter parameters even with
`--engine`.
- You will not be able to run unit tests through arc unless you pass
`--engine` explicitly.
- You will not be able to trigger lint and unit integration through
`arc diff`.
- You will not be able to put Git working copies into immutable history mode
(see below).
- You will not be able to specify a repository encoding. UTF-8 will be assumed
if you do not pass `--encoding`.
- You will not be able to add plugins to arc to modify existing workflows or
add new ones.
- You will not be able to load additional libraries unless you specify them
explicitly with `--load-phutil-library`.
- Symbol index integration, which allows users to click function or class
names in Differential and jump to their definitions, will not work.
- `arc patch` will be unable to detect that you are applying changes to the
wrong project.
- In Subversion, `arc` will be unable to determine the canonical root
of a project, and will assume it is the working directory (in Subversion
prior to 1.7) or the root of the checkout (in Subversion after 1.7). This
means the paths of files in diffs won't be anchored to the same place,
and will have different amounts of path context, which may be confusing for
reviewers and will sometimes prevent patches from applying properly if they
are applied against a different directory than they were generated from.
- In Subversion, `arc` will be unable to guess that you intend to update
an existing revision; you must use `--update` explicitly or `--preview`
and attach diffs via the web interface.
= Next Steps =
Continue by:
- returning to @{article:Arcanist User Guide}.
diff --git a/src/docs/user/userguide/arcanist_quick_start.diviner b/src/docs/user/userguide/arcanist_quick_start.diviner
index 25847ab8a6..2326add155 100644
--- a/src/docs/user/userguide/arcanist_quick_start.diviner
+++ b/src/docs/user/userguide/arcanist_quick_start.diviner
@@ -1,79 +1,79 @@
@title Arcanist Quick Start
@group userguide
Quick guide to getting Arcanist working for a new project.
This is a summary of steps to install Arcanist, configure a project for use with
it, and run `arc` to send changes for review. For detailed instructions on
installing Arcanist, see @{article:Arcanist User Guide}. OS specific guides
are also available.
- For Mac OS X, see @{article:Arcanist User Guide: Mac OS X}.
- For Windows, see @{article:Arcanist User Guide: Windows}.
= Installing Arcanist =
First, install dependencies:
- Install PHP.
- Install Git.
Then install Arcanist itself:
somewhere/ $ git clone https://github.com/phacility/arcanist.git
Add `arc` to your path:
$ export PATH="$PATH:/somewhere/arcanist/bin/"
This won't work for Windows, see @{article:Arcanist User Guide: Windows} for
instructions.
= Configure Your Project =
For detailed instructions on project configuration, see
@{article:Arcanist User Guide: Configuring a New Project}.
Create a `.arcconfig` file in your project's working copy:
$ cd yourproject/
yourproject/ $ $EDITOR .arcconfig
yourproject/ $ cat .arcconfig
{
- "phabricator.uri" : "https://phabricator.example.com/"
+ "phabricator.uri" : "https://phorge.example.com/"
}
-Set `phabricator.uri` to the URI for your Phabricator install (where `arc`
+Set `phabricator.uri` to the URI for your Phorge install (where `arc`
should send changes to).
NOTE: You should **commit this file** to the repository.
= Install Arcanist Credentials =
-Credentials allow you to authenticate. You must have an account on Phabricator
+Credentials allow you to authenticate. You must have an account on Phorge
before you can perform this step.
$ cd yourproject/
yourproject/ $ arc install-certificate
...
Follow the instructions. This will link your user account on your local machine
-to your Phabricator account.
+to your Phorge account.
= Send Changes For Review =
For detailed instructions on using `arc diff`, see
@{article:Arcanist User Guide: arc diff}.
$ $EDITOR file.c
$ arc diff
= Next Steps =
Continue by:
- learning more about project configuration with
@{article:Arcanist User Guide: Configuring a New Project}; or
- learning more about `arc diff` with
@{article:Arcanist User Guide: arc diff}; or
- returning to @{article:Arcanist User Guide}.
diff --git a/src/docs/user/userguide/audit.diviner b/src/docs/user/userguide/audit.diviner
index 5223f6c969..c179a9e61f 100644
--- a/src/docs/user/userguide/audit.diviner
+++ b/src/docs/user/userguide/audit.diviner
@@ -1,199 +1,199 @@
@title Audit User Guide
@group userguide
-Guide to using Phabricator to audit published commits.
+Guide to using Phorge to audit published commits.
Overview
========
-Phabricator supports two code review workflows, "review" (pre-publish) and
+Phorge supports two code review workflows, "review" (pre-publish) and
"audit" (post-publish). To understand the differences between the two, see
@{article:User Guide: Review vs Audit}.
How Audit Works
===============
The audit workflow occurs after changes have been published. It provides ways
to track, discuss, and resolve issues with commits that are discovered after
they go through whatever review process you have in place (if you have one).
Two examples of how you might use audit are:
**Fix Issues**: If a problem is discovered after a change has already been
published, users can find the commit which introduced the problem and raise a
concern on it. This notifies the author of the commit and prompts them to
remedy the issue.
**Watch Changes**: In some cases, you may want to passively look over changes
that satisfy some criteria as they are published. For example, you may want to
review all Javascript changes at the end of the week to keep an eye on things,
or make sure that code which impacts a subsystem is looked at by someone on
that team, eventually.
Developers may also want other developers to take a second look at things if
they realize they aren't sure about something after a change has been published,
or just want to provide a heads-up.
You can configure Herald rules and Owners packages to automatically trigger
audits of commits that satisfy particular criteria.
Audit States and Actions
========================
The audit workflow primarily keeps track of two things:
- **Commits** and their audit state (like "Not Audited", "Approved", or
"Concern Raised").
- **Audit Requests** which ask a user (or some other entity, like a project
or package) to audit a commit. These can be triggered in a number of ways
(see below).
Users interact with commits by leaving comments and applying actions, like
accepting the changes or raising a concern. These actions change the state of
their own audit and the overall audit state of the commit. Here's an example of
a typical audit workflow:
- Alice publishes a commit containing some Javascript.
- This triggers an audit request to Bailey, the Javascript technical
lead on the project (see below for a description of trigger mechanisms).
- - Later, Bailey logs into Phabricator and sees the audit request. She ignores
+ - Later, Bailey logs into Phorge and sees the audit request. She ignores
it for the moment, since it isn't blocking anything. At the end of the
week she looks through her open requests to see what the team has been
up to.
- Bailey notices a few minor problems with Alice's commit. She leaves
comments describing improvements and uses "Raise Concern" to send the
commit back into Alice's queue.
- - Later, Alice logs into Phabricator and sees that Bailey has raised a
+ - Later, Alice logs into Phorge and sees that Bailey has raised a
concern (usually, Alice will also get an email). She resolves the issue
somehow, maybe by making a followup commit with fixes.
- After the issues have been dealt with, she uses "Request Verification" to
return the change to Bailey so Bailey can verify that the concerns have
been addressed.
- Bailey uses "Accept Commit" to close the audit.
In {nav Diffusion > Browse Commits}, you can review commits and query for
commits with certain audit states. The default "Active Audits" view shows
all of the commits which are relevant to you given their audit state, divided
into buckets:
- **Needs Attention**: These are commits which you authored that another
user has raised a concern about: for example, maybe they believe they have
found a bug or some other problem. You should address the concerns.
- **Needs Verification**: These are commits which someone else authored
that you previously raised a concern about. The author has indicated that
they believe the concern has been addressed. You should verify that the
remedy is satisfactory and accept the change, or raise a further concern.
- **Ready to Audit**: These are commits which someone else authored that you
have been asked to audit, either by a user or by a system rule. You should
look over the changes and either accept them or raise concerns.
- **Waiting on Authors**: These are commits which someone else authored that
you previously raised a concern about. The author has not responded to the
concern yet. You may want to follow up.
- **Waiting on Auditors**: These are commits which you authored that someone
else needs to audit.
You can use the query constraints to filter this list or find commits that
match certain criteria.
Audit Triggers
==============
Audit requests can be triggered in a number of ways:
- You can add auditors explicitly from the web UI, using either "Edit Commit"
or the "Change Auditors" action. You might do this if you realize you are
not sure about something that you recently published and want a second
opinion.
- If you put `Auditors: username1, username2` in your commit message, it will
trigger an audit request to those users when you push it to a tracked
branch.
- You can create rules in Herald that trigger audits based on properties
of the commit -- like the files it touches, the text of the change, the
author, etc.
- You can create an Owners package and enable automatic auditing for the
package.
Audits in Small Teams
=====================
If you have a small team and don't need complicated trigger rules, you can set
up a simple audit workflow like this:
- Create a new Project, "Code Audits".
- Create a new global Herald rule for Commits, which triggers an audit by
the "Code Audits" project for every commit where "Differential Revision"
"does not exist" (this will allow you to transition partly or fully to
review later if you want).
- Have every engineer join the "Code Audits" project.
This way, everyone will see an audit request for every commit, but it will be
dismissed if anyone approves it. Effectively, this enforces the rule "every
commit should have //someone// look at it".
Once your team gets bigger, you can refine this ruleset so that developers see
only changes that are relevant to them.
Audit Tips
==========
- When viewing a commit, audit requests you are responsible for are
highlighted. You are responsible for a request if it's a user request
and you're that user, or if it's a project request and you're a member
of the project, or if it's a package request and you're a package owner.
Any action you take will update the state of all the requests you're
responsible for.
- You can leave inline comments by clicking the line numbers in the diff.
- You can leave a comment across multiple lines by dragging across the line
numbers.
- Inline comments are initially saved as drafts. They are not submitted until
you submit a comment at the bottom of the page.
- Press "?" to view keyboard shortcuts.
Audit Maintenance
=================
The `bin/audit` command allows you to perform several maintenance operations.
Get more information about a command by running:
```
-phabricator/ $ ./bin/audit help <command>
+phorge/ $ ./bin/audit help <command>
```
Supported operations are:
**Delete Audits**: Delete audits that match certain parameters with
`bin/audit delete`.
You can use this command to forcibly delete requests which may have triggered
incorrectly (for example, because a package or Herald rule was configured in an
overbroad way).
**Synchronize Audit State**: Synchronize the audit state of commits to the
current open audit requests with `bin/audit synchronize`.
Normally, overall audit state is automatically kept up to date as changes are
made to an audit. However, if you manually update the database to make changes
to audit request state, the state of corresponding commits may no longer be
consistent.
This command will update commits so their overall audit state reflects the
cumulative state of their actual audit requests.
**Update Owners Package Membership**: Update which Owners packages commits
belong to with `bin/audit update-owners`.
Normally, commits are automatically associated with packages when they are
imported. You can use this command to manually rebuild this association if
you run into problems with it.
Next Steps
==========
- Learn more about Herald at @{article:Herald User Guide}.
diff --git a/src/docs/user/userguide/calendar.diviner b/src/docs/user/userguide/calendar.diviner
index 5c09a753b7..14fb825b59 100644
--- a/src/docs/user/userguide/calendar.diviner
+++ b/src/docs/user/userguide/calendar.diviner
@@ -1,110 +1,110 @@
@title Calendar User Guide
@group userguide
Guide to the Calendar application.
Overview
========
IMPORTANT: Calendar is a prototype application. See
@{article:User Guide: Prototype Applications}.
Calendar allows you to schedule parties and invite other users to party with
you. Everyone loves to party. Use Calendar primarily for partying.
Reminders
=========
Calendar sends reminder email before events occur. You will receive a reminder
if:
- you have marked yourself as **attending** the event;
- the event has not been cancelled; and
- the event was not imported from an external source.
Reminders are sent 15 minutes before events begin.
Availability
============
-Across all applications, Phabricator shows a red dot next to usernames if the
+Across all applications, Phorge shows a red dot next to usernames if the
user is currently attending an event. This provides a hint that they may be in
a meeting (or on vacation) and could take a while to get back to you about a
revision or task.
You can click through to a user's profile to see more details about their
availability.
Status Icons
============
On the month and day views, Calendar shows an icon next to each event to
indicate status. The icons are:
- {icon user-plus, color=green} **Invited, Individual**: You're personally
invited to the event.
- {icon users, color=green} **Invited, Group**: A project you are a member
of is invited to the event.
- {icon check-circle, color=green} **Attending**: You're attending the event.
- {icon times-circle, color=grey} **Declined**: You've declined the event.
- {icon times, color=red} **Cancelled**: The event has been cancelled.
If you don't have any special relationship to the event and the event does not
have any special status, an event-specific icon is shown instead.
Importing Events
================
You can import events from email and from other calendar applications
(like Google Calendar and Calendar.app) into Calendar. For a detailed
guide, see @{article:Calendar User Guide: Importing Events}.
Exporting Events
================
You can export events from Calendar to other applications by downloading
events as `.ics` files or configuring a calendar subscription.
Calendar also attaches `.ics` files containing event information when it sends
email. Most calendar applications can import these files.
For a detailed guide to exporting events, see
@{article:Calendar User Guide: Exporting Events}.
Recurring Events
================
To create a recurring event (like a weekly meeting), first create an event
normally, then select {nav Make Recurring} from the action menu and configure
how often the event should repeat.
**Monthly Events on the 29th, 30th or 31st**: If you configure an event to
repeat monthly and schedule the first instance on the 29th, 30th, or 31st of
the month, it can not occur on the same day every month because some months
do not have enough days.
Instead, these events are internally scheduled to occur relative to the end
of the month. For example, if you schedule a monthly event on the 30th of a
31 day month, it will occur on the second-to-last day of each following month.
**Complex RRULEs**: Calendar supports complex RRULEs internally (like events
that occur every-other Thursday in prime-numbered months) but does not
currently have a UI for scheduling events with complex rules.
Future versions of Calendar may improve support for complex scheduling by using
the UI. In some cases, a partial workaround is to schedule the event in another
application (which has more complex scheduling controls available) and then
import it into Calendar.
Next Steps
==========
Continue by:
- importing events with @{article:Calendar User Guide: Importing Events}; or
- exporting events with @{article:Calendar User Guide: Exporting Events}.
diff --git a/src/docs/user/userguide/calendar_exports.diviner b/src/docs/user/userguide/calendar_exports.diviner
index 487de559a4..1d6d6bc1da 100644
--- a/src/docs/user/userguide/calendar_exports.diviner
+++ b/src/docs/user/userguide/calendar_exports.diviner
@@ -1,97 +1,97 @@
@title Calendar User Guide: Exporting Events
@group userguide
Exporting events to other calendars.
Overview
========
IMPORTANT: Calendar is a prototype application. See
@{article:User Guide: Prototype Applications}.
-You can export events from Phabricator to other calendar applications like
+You can export events from Phorge to other calendar applications like
**Google Calendar** or **Calendar.app**. This document will guide you through
-how to export event data from Phabricator.
+how to export event data from Phorge.
When you export events into another application, they generally will not be
editable from that application. Exporting events allows you to create one
calendar that shows all the events you care about in whatever application you
prefer (so you can keep track of everything you need to do), but does not let
-you edit Phabricator events from another application.
+you edit Phorge events from another application.
When exporting events, you can either export individual events one at a time
or export an entire group of events (for example, all events you are attending).
Exporting a Single Event
========================
To export a single event, visit the event detail page and click
{nav Export as .ics}. This will download an `.ics` file which you can import
into most other calendar applications.
Mail you receive about events also has a copy of this `.ics` file attached to
it. You can import this `.ics` file directly.
In **Google Calendar**, use {nav Other Calendars > Import Calendar} to import
the `.ics` file.
In **Calendar.app**, use {nav File > Import...} to import the `.ics` file, or
drag the `.ics` file onto your calendar.
When you export a recurring event, the `.ics` file will contain information
about the entire event series.
If you want to update event information later, you can just repeat this
process. Calendar applications will update the existing event if you've
previously imported an older version of it.
Exporting a Group of Events
===========================
You can export a group of events matching an arbitrary query (like all events
you are attending) to keep different calendars in sync.
To export a group of events:
- Run a query in Calendar which selects the events you want to export.
- Example: All events you are attending.
- Example: All events you are invited to.
- Example: All events tagged `#meetup`.
- Select the {nav Use Results... > Export Query as .ics} action to turn
the query into an export.
- Name the export with a descriptive name.
- Select a policy mode for the export (see below for discussion).
- Click {nav Create New Export} to finish the process.
The **policy modes** for exports are:
- **Public**: Only public information (visible to logged-out users) will
be exported. This mode is not available if your install does not have
public information (per `policy.allow-public` in Config).
- **Privileged**: All event information will be exported. This means that
anyone who knows the export URI can see ALL of the related event
information, as though they were logged in with your account.
WARNING: Anyone who learns the URI for an export can see the data you choose
-to export, even if they don't have a Phabricator account! Be careful about how
+to export, even if they don't have a Phorge account! Be careful about how
much data you export and treat the URI as a secret. If you accidentally share
a URI, you can disable the export.
After finishing the process, you'll see a screen with some details about the
export and an **ICS URI**. This URI allows you to import the events which match
the query into another calendar application.
In **Google Calendar**, use {nav Other Calendars > Add by URL} to import the
URI.
In **Calendar.app**, use {nav File > New Calendar Subscription...} to subscribe
to the URI.
Next Steps
==========
Continue by:
- returning to the @{article:Calendar User Guide}.
diff --git a/src/docs/user/userguide/calendar_imports.diviner b/src/docs/user/userguide/calendar_imports.diviner
index a8fbc6ff09..bebfb84ed2 100644
--- a/src/docs/user/userguide/calendar_imports.diviner
+++ b/src/docs/user/userguide/calendar_imports.diviner
@@ -1,132 +1,132 @@
@title Calendar User Guide: Importing Events
@group userguide
Importing events from other calendars.
Overview
========
IMPORTANT: Calendar is a prototype application. See
@{article:User Guide: Prototype Applications}.
-You can import events into Phabricator to other calendar applications or from
+You can import events into Phorge to other calendar applications or from
`.ics` files. This document will guide you through how to import event data
-into Phabricator.
+into Phorge.
When you import events from another application, they can not be edited in
-Phabricator. Importing events allows you to share events or keep track of
+Phorge. Importing events allows you to share events or keep track of
events from different sources, but does not let you edit events from other
-applications in Phabricator.
+applications in Phorge.
Import Policies
===============
When you import events, you select a visibility policy for the import. By
default, imported events are only visible to you (the user importing them).
To share imported events with other users, make the import **Visible To**
a wider set of users, like "All Users".
Importing `.ics` Files
======================
`.ics` files contain information about events, usually either about a single
event or an entire event calendar.
If you have an event or calendar in `.ics` format, you can import it into
-Phabricator in two ways:
+Phorge in two ways:
- Navigate to {nav Calendar > Imports > Import Events > Import .ics File}.
- Drag and drop the file onto a Calendar.
-This will create a copy of the event in Phabricator.
+This will create a copy of the event in Phorge.
If you want to update an imported event later, just repeat this process. The
event will be updated with the latest information.
Many applications send `.ics` files as email attachments. You can import these
-into Phabricator.
+into Phorge.
.ics Files: Google Calendar
===========================
In **Google Calendar**, you can generate a `.ics` file for a calendar by
clicking the dropdown menu next to the calendar and selecting
{nav Calendar Settings > Export Calendar > Export this calendar}.
.ics Files: Calendar.app
========================
In **Calendar.app**, you can generate an `.ics` file for a calendar by
selecting the calendar, then selecting {nav File > Export > Export...} and
saving the calendar as a `.ics` file.
You can also convert an individual event into an `.ics` file by dragging it
from the calendar to your desktop (or any other folder).
-When you import an event using an `.ics` file, Phabricator can not
+When you import an event using an `.ics` file, Phorge can not
automatically keep the event up to date. You'll need to repeat the process if
-there are changes to the event or calendar later, so Phabricator can learn
+there are changes to the event or calendar later, so Phorge can learn
about the updates.
Importing .ics URIs
=====================
If you have a calendar in another application that supports publishing a
-`.ics` URI, you can subscribe to it in Phabricator. This will import the entire
+`.ics` URI, you can subscribe to it in Phorge. This will import the entire
calendar, and can be configured to automatically keep it up to date and in sync
with the external calendar.
First, find the subscription URI for the calendar you want to import (see
below for some guidance on popular calendar applications). Then, browse to
{nav Calendar > Imports > Import Events > Import .ics URI}.
When you import a URI, you can choose to enable automatic updates. If you do,
-Phabricator will periodically update the events it imports from this source.
+Phorge will periodically update the events it imports from this source.
You can stop this later by turning off the automatic updates or disabling
the import.
{icon lock} **Privacy Note**: When you import via URI, the URI often contains
sensitive information (like a username, password, or secret key) which allows
anyone who knows it to access private details about events. Anyone who can edit
the import will also be able to view and edit the URI, so make sure you don't
grant edit access to users who should not have access to the event details.
.ics URIs: Google Calendar
==========================
In **Google Calendar**, you can get the subscription URI for a calendar
by selecting {nav Calendar Settings} from the dropdown next to the calendar,
then copying the URL from the {nav ICAL} link under **Private Address**. This
URI provides access to all event details, including private information.
You may need to adjust the sharing and visibility settings for the calendar
before this option is available.
Alternatively, you can use the URI from the {nav ICAL} link under
**Calendar Address** to access a more limited set of event details. You can
configure which details are available by configuring how the calendar is
shared.
.ics URIs: Calendar.app
=======================
**Calendar.app** does not support subscriptions via `.ics` URIs.
You can export a calendar as an `.ics` file by following the steps above, but
-Phabricator can not automatically keep events imported in this way up to date.
+Phorge can not automatically keep events imported in this way up to date.
Next Steps
==========
Continue by:
- returning to the @{article:Calendar User Guide}.
diff --git a/src/docs/user/userguide/conduit.diviner b/src/docs/user/userguide/conduit.diviner
index 35daee505f..dd74919a72 100644
--- a/src/docs/user/userguide/conduit.diviner
+++ b/src/docs/user/userguide/conduit.diviner
@@ -1,67 +1,67 @@
@title Conduit API Overview
@group conduit
Overview of the Conduit API.
Overview
========
-Conduit is the HTTP API for Phabricator. It is roughly JSON-RPC: you usually
+Conduit is the HTTP API for Phorge. It is roughly JSON-RPC: you usually
pass a JSON blob, and usually get a JSON blob back, although both call and
result formats are flexible in some cases.
API Clients
===========
The primary ways to make Conduit calls are:
**Web Console**: The {nav Conduit} application provides a web UI for exploring
the API and making calls. This is the best starting point for learning about
the API. See the next section for details.
`ConduitClient`: This is the official client available in `arcanist`.
`arc call-conduit`: You can use this `arc` command to execute low-level
Conduit calls by piping JSON in to stdin. This can provide a simple way
to explore the API, or a quick way to get API access from a script written
in another language without needing a real client.
`curl`: You can format a call with basic HTTP parameters and cURL. The console
includes examples which show how to format calls.
**Other Clients**: There are also clients available in other languages. You
-can check the [[ https://secure.phabricator.com/w/community_resources/ |
+can check the [[ https://we.phorge.it/w/community_resources/ |
Community Resources ]] page for links.
API Console
===========
The easiest way to begin exploring Conduit is by visiting {nav Conduit} in the
web UI. The application provides an API console which you can use to explore
available methods, make calls, read documentation, and see examples.
The API console has details about how to construct calls and generate API
tokens for authentication.
Querying and Reading Objects
============================
For information on searching for objects and reading their properties and
information, see @{article:Conduit API: Using Search Endpoints}.
Creating and Editing Objects
============================
For information on creating, editing and updating objects, see
@{article:Conduit API: Using Edit Endpoints}.
Next Steps
==========
Continue by:
- reading recommendations on responding to API changes in
@{article:Managing Conduit Changes}.
diff --git a/src/docs/user/userguide/differential.diviner b/src/docs/user/userguide/differential.diviner
index 496fba6388..d48ff4ceff 100644
--- a/src/docs/user/userguide/differential.diviner
+++ b/src/docs/user/userguide/differential.diviner
@@ -1,71 +1,71 @@
@title Differential User Guide
@group userguide
Guide to the Differential (pre-push code review) tool and workflow.
= Overview =
-Phabricator supports two code review workflows, "review" (pre-push) and
+Phorge supports two code review workflows, "review" (pre-push) and
"audit" (post-push). To understand the differences between the two, see
@{article:User Guide: Review vs Audit}.
This document summarizes the pre-push "review" workflow implemented by the tool
//Differential//.
= How Review Works =
-Code review in Phabricator is a lightweight, asynchronous web-based process. If
+Code review in Phorge is a lightweight, asynchronous web-based process. If
you are familiar with GitHub, it is similar to how pull requests work:
- An author prepares a change to a codebase, then sends it for review. They
specify who they want to review it (additional users may be notified as
well, see below). The change itself is called a "Differential Revision".
- The reviewers receive an email asking them to review the change.
- The reviewers inspect the change and either discuss it, approve it, or
request changes (e.g., if they identify problems or bugs).
- In response to feedback, the author may update the change (e.g., fixing
the bugs or addressing the problems).
- Once everything is satisfied, some reviewer accepts the change and the
author pushes it to the upstream.
The Differential home screen shows two sets of revisions:
- **Action Required** is revisions you are the author of or a reviewer for,
which you need to review, revise, or push.
- **Waiting on Others** is revisions you are the author of or a reviewer for,
which someone else needs to review, revise, or push.
= Creating Revisions =
The preferred way to create revisions in Differential is with `arc`
(see @{article:Arcanist User Guide}). You can also create revisions from the
web interface, by navigating to Differential, pressing the "Create Revision"
button, and pasting a diff in.
= Herald Rules =
If you're interested in keeping track of changes to certain parts of a codebase
(e.g., maybe changes to a feature or changes in a certain language, or there's
just some intern who you don't trust) you can write a Herald rule to
automatically CC you on any revisions which match rules (like content, author,
files affected, etc.)
Inline Comments
===============
You can leave inline comments by clicking the line number next to a line. For
an in-depth look at inline comments, see
@{article:Differential User Guide: Inline Comments}.
Next Steps
==========
Continue by:
- diving into the details of inline comments in
@{article:Differential User Guide: Inline Comments}; or
- reading the FAQ at @{article:Differential User Guide: FAQ}; or
- learning about test plans in
@{article:Differential User Guide: Test Plans}; or
- learning more about Herald in @{article:Herald User Guide}.
diff --git a/src/docs/user/userguide/differential_land.diviner b/src/docs/user/userguide/differential_land.diviner
index 8d5f30d784..540e4b61f0 100644
--- a/src/docs/user/userguide/differential_land.diviner
+++ b/src/docs/user/userguide/differential_land.diviner
@@ -1,53 +1,53 @@
@title Differential User Guide: Automated Landing
@group userguide
-Configuring Phabricator so you can "Land Revision" from the web UI.
+Configuring Phorge so you can "Land Revision" from the web UI.
Overview
========
IMPORTANT: This feature is a prototype and has substantial limitations.
-Phabricator can be configured so that approved revisions may be published
+Phorge can be configured so that approved revisions may be published
directly from the web interface. This can make publishing changes more
convenient, particularly for open source projects where authors may not have
commit access to the repository. This document explains the workflow and how to
configure it.
When properly configured, a {nav Land Revision} action will appear in
Differential. This action works like `arc land` on the command line, and
merges and publishes the revision.
This feature has significant limitations:
- This feature is a prototype.
- This feature is only supported in Git.
- This feature always lands changes onto `master`.
- This feature does not currently provide chain of custody, and what lands
may be arbitrarily different than what is shown in Differential.
To be landable, a revision must satisfy these requirements:
- It must belong to a repository which is tracked in Diffusion
(both hosted and imported repositories will work).
- The repository must have a **Staging Area** configured.
- The repository must have **Repository Automation** configured. For
details, see @{article:Drydock User Guide: Repository Automation}.
- The revision must have been created with `arc diff` and pushed to the
configured staging area at creation time.
- The user clicking the "Land Revision" button must have permission to push
to the repository.
If these requirements are met, the {nav Land Revision} action should be
available in the UI.
Next Steps
==========
Continue by:
- configuring repository automation with
@{article:Drydock User Guide: Repository Automation}; or
- returning to the @{article:Differential User Guide}.
diff --git a/src/docs/user/userguide/diffusion.diviner b/src/docs/user/userguide/diffusion.diviner
index 8ee66d3f8f..60c28006a2 100644
--- a/src/docs/user/userguide/diffusion.diviner
+++ b/src/docs/user/userguide/diffusion.diviner
@@ -1,94 +1,94 @@
@title Diffusion User Guide
@group userguide
-Guide to Diffusion, the Phabricator application for hosting and browsing
+Guide to Diffusion, the Phorge application for hosting and browsing
repositories.
Overview
========
Diffusion allows you to create repositories so that you can browse them from
the web and interact with them from other applications.
Diffusion can host repositories locally, or observe existing remote
repositories which are hosted elsewhere (for example, on GitHub, Bitbucket, or
other existing hosting). Both types of repositories can be browsed and
interacted with, but hosted repositories support some additional triggers
and access controls which are not available for observed repositories.
-Diffusion is integrated with the other tools in the Phabricator suite. For
+Diffusion is integrated with the other tools in the Phorge suite. For
instance:
- when you commit Differential revisions to a tracked repository, they are
automatically updated and linked to the corresponding commits;
- you can add Herald rules to notify you about commits that match certain
rules;
- for hosted repositories, Herald can enforce granular access control rules;
- in all the tools, commit names are automatically linked.
The remainder of this document walks through creating, configuring, and
managing repositories.
Adding Repositories
===================
Repository administration is accomplished through Diffusion. You can use the
web interface in Diffusion to observe an external repository or create a new
hosted repository.
- For hosted repositories, make sure you go through the setup instructions
in @{article:Diffusion User Guide: Repository Hosting} first.
- For all repositories, you'll need to be running the daemons. If you have
not set them up yet, see @{article:Managing Daemons with phd}.
By default, you must be an administrator to create a new repository. You can
change this in the application settings.
Managing Repositories
=====================
Diffusion repositories have an array of configurable options and behaviors. For
details on the available options and guidance on managing and administrating
repositories, see @{article:Diffusion User Guide: Managing Repositories}.
Repositories can also be managed via the API. For an overview on using the
API to create and edit repositories, see
@{article:Diffusion User Guide: Repositories API}.
Repository Clustering
=====================
-Phabricator repository hosts can be set up in a cluster configuration so you
+Phorge repository hosts can be set up in a cluster configuration so you
can lose hosts with minimal downtime and data loss. This is an advanced feature
which most installs do not need to pursue.
To get started with clustering, see @{article:Clustering Introduction}. For
details on repository clustering, see @{article:Cluster: Repositories}.
Next Steps
==========
Continue by:
- learning how to creating a symbol index at
@{article:Diffusion User Guide: Symbol Indexes}; or
- setting up repository hosting with
@{article:Diffusion User Guide: Repository Hosting}; or
- managing repository hooks with
@{article:Diffusion User Guide: Commit Hooks}; or
- understanding daemons in more detail with
@{article:Managing Daemons with phd}.
If you're having trouble getting things working, these topic guides may be
helpful:
- get details about automatically taking actions in response to commits in
@{article:Diffusion User Guide: Permanent Refs}; or
- - understand how Phabricator updates repositories with
+ - understand how Phorge updates repositories with
@{article:Diffusion User Guide: Repository Updates}; or
- fix issues with repository imports with
@{article:Troubleshooting Repository Imports}.
diff --git a/src/docs/user/userguide/diffusion_api.diviner b/src/docs/user/userguide/diffusion_api.diviner
index c2508dda72..3eaafa1bb8 100644
--- a/src/docs/user/userguide/diffusion_api.diviner
+++ b/src/docs/user/userguide/diffusion_api.diviner
@@ -1,182 +1,182 @@
@title Diffusion User Guide: Repositories API
@group userguide
Managing repositories with the API.
Overview
========
You can create and update Diffusion repositories using the Conduit API. This
may be useful if you have a large number of existing repositories you want
to import or apply bulk actions to.
For an introduction to Conduit, see @{article:Conduit API Overview}.
In general, you'll use these API methods:
- `diffusion.repository.edit`: Create and edit repositories.
- `diffusion.uri.edit`: Create and edit repository URIs to configure
observation, mirroring, and cloning.
To create a repository, you'll generally do this:
- Call `diffusion.repository.edit` to create a new object and configure
basic information.
- Optionally, call `diffusion.uri.edit` to add URIs to observe or mirror.
- Call `diffusion.repository.edit` to activate the repository.
This workflow mirrors the workflow from the web UI. The remainder of this
document walks through this workflow in greater detail.
Create a Repository
===================
To create a repository, call `diffusion.repository.edit`, providing any
properties you want to set. For simplicity these examples will use the
builtin `arc call-conduit` client, but you can use whatever Conduit client
you prefer.
When creating a repository, you must provide a `vcs` transaction to choose
a repository type, one of: `git`, `hg` or `svn`.
You must also provide a `name`.
Other properties are optional. Review the Conduit method documentation from the
web UI for an exhaustive list.
```
$ echo '{
"transactions": [
{
"type": "vcs",
"value": "git"
},
{
"type": "name",
"value": "Poetry"
}
]
}' | arc call-conduit diffusion.repository.edit
```
If things work, you should get a result that looks something like this:
```lang=json
{
...
"response": {
"object": {
"id": 1,
"phid": "PHID-REPO-7vm42oayez2rxcmpwhuv"
},
...
}
...
}
```
If so, your new repository has been created. It hasn't been activated yet so
it will not show up in the default repository list, but you can find it in the
web UI by browsing to {nav Diffusion > All Repositories}.
Continue to the next step to configure URIs.
Configure URIs
==============
Now that the repository exists, you can add URIs to it. This is optional,
and if you're creating a //hosted// repository you may be able to skip this
step.
-However, if you want Phabricator to observe an existing remote, you'll
+However, if you want Phorge to observe an existing remote, you'll
configure it here by adding a URI in "Observe" mode. Use the PHID from the
previous step to identify the repository you want to add a URI to, and call
`diffusion.uri.edit` to create a new URI in Observe mode for the repository.
You need to provide a `repository` to add the URI to, and the `uri` itself.
To add the URI in Observe mode, provide an `io` transaction selecting
`observe` mode.
You may also want to provide a `credential`.
```
$ echo '{
"transactions": [
{
"type": "repository",
"value": "PHID-REPO-7vm42oayez2rxcmpwhuv"
},
{
"type": "uri",
"value": "https://github.com/epriestley/poems.git"
},
{
"type": "io",
"value": "observe"
}
]
}' | arc call-conduit diffusion.uri.edit
```
You should get a response that looks something like this:
```lang=json
{
...
"response": {
"object": {
"id": 1,
"phid": "PHID-RURI-zwtho5o7h3m6rjzgsgrh"
},
...
}
...
}
```
If so, your URI has been created. You can review it in the web UI, under
{nav Manage Repository > URIs}.
When satisfied, continue to the next step to activate the repository.
Activate the Repository
=======================
Now that any URIs have been configured, activate the repository with another
call to `diffusion.repository.edit`. This time, modify the existing repository
instead of creating a new one:
```
$ echo '{
"objectIdentifier": "PHID-REPO-7vm42oayez2rxcmpwhuv",
"transactions": [
{
"type": "status",
"value": "active"
}
]
}' | arc call-conduit diffusion.repository.edit
```
If that goes through cleanly, you should be all set. You can review the
repository from the web UI.
Editing Repositories
====================
To edit an existing repository, apply changes normally with
`diffusion.repository.edit`. For more details on using edit endpoints, see
@{article:Conduit API: Using Edit Endpoints}.
Next Steps
==========
Continue by:
- returning to the @{article:Diffusion User Guide}.
diff --git a/src/docs/user/userguide/diffusion_existing.diviner b/src/docs/user/userguide/diffusion_existing.diviner
index 341146f0fd..f0c9bf0a33 100644
--- a/src/docs/user/userguide/diffusion_existing.diviner
+++ b/src/docs/user/userguide/diffusion_existing.diviner
@@ -1,68 +1,68 @@
@title Diffusion User Guide: Existing Repositories
@group userguide
Quick guide for importing or observing existing repositories.
Overview
========
If you have an existing repository, you can observe or import it into
Diffusion.
-Observing a repository creates a read-only copy in Phabricator that is kept
+Observing a repository creates a read-only copy in Phorge that is kept
up to date by continuously importing new changes.
Importing a repository creates a read-write copy.
This document is a quick guide to getting started. For an overview of
Diffusion, see @{article:Diffusion User Guide}. For a more detailed guide
about managing repositories and URIs in Diffusion, see
@{article:Diffusion User Guide: URIs}.
Observing Repositories
======================
To observe an existing repository:
- Create a repository in Diffusion, but do not activate it yet.
- Add the URI for the existing repository you wish to observe in the
**URIs** section, in **Observe** mode.
- Activate the repository in Diffusion.
-This creates a read-only copy of the repository in Phabricator. Phabricator
+This creates a read-only copy of the repository in Phorge. Phorge
will keep its copy in sync with the remote by periodically polling the remote
for changes.
For more details, see @{article:Diffusion User Guide: URIs}.
Importing Repositories
======================
There are two primary ways to import an existing repository:
**Observe First**: In Git or Mercurial, you can observe the repository first.
Once the import completes, change the "I/O Type" on the **Observe** URI to
"No I/O" mode to automatically convert it into a hosted repository.
**Push to Empty Repository**: Create and activate an empty repository, then push
all of your changes to the empty repository.
In Git and Mercurial, you can do this with `git push` or `hg push`.
In Subversion, you can do this with `svnsync`.
For more details, see @{article:Diffusion User Guide: URIs}.
Next Steps
==========
Continue by:
- reading an overview of Diffusion in
@{article:Diffusion User Guide}; or
- learning more about managing remote repository URIs in
@{article:Diffusion User Guide: URIs}.
diff --git a/src/docs/user/userguide/diffusion_hooks.diviner b/src/docs/user/userguide/diffusion_hooks.diviner
index 54c93a933b..0f74a53f58 100644
--- a/src/docs/user/userguide/diffusion_hooks.diviner
+++ b/src/docs/user/userguide/diffusion_hooks.diviner
@@ -1,52 +1,52 @@
@title Diffusion User Guide: Commit Hooks
@group userguide
Guide to commit hooks in hosted repositories.
= Overview =
-Phabricator installs pre-receive/pre-commit hooks in hosted repositories
+Phorge installs pre-receive/pre-commit hooks in hosted repositories
automatically. They enforce a few rules automatically (like preventing
dangerous changes unless a repository is configured to allow them). They can
also enforce more complex rules via Herald, using the "Commit Hook:
Branches/Tags/Bookmarks" and "Commit Hook: Commit Content" rule types.
Herald rules are flexible, and can express many of the most common hooks that
are often installed on repositories (like protecting branches, restricting
access to repositories, and requiring review).
However, if Herald isn't powerful enough to enforce everything you want to
check, you can install additional custom hooks. These work mostly like normal
hooks, but with a few differences.
= Installing Custom Hooks =
With hosted repositories, you can install hooks by dropping them into the
relevant directory of the repository on disk:
- **SVN** Put hooks in `hooks/pre-commit-phabricator.d/`.
- **Git** Put hooks in `hooks/pre-receive-phabricator.d/`.
- - **Mercurial** Phabricator does not currently support custom hooks in
+ - **Mercurial** Phorge does not currently support custom hooks in
Mercurial.
These hooks act like normal `pre-commit` or `pre-receive` hooks:
- Executables in these directories will be run one at a time, in alphabetical
order.
- They'll be passed the arguments and environment that normal hooks are
passed.
- They should emit output and return codes like normal hooks do.
- These hooks will run only after all the Herald rules have passed and
- Phabricator is otherwise ready to accept the commit or push.
+ Phorge is otherwise ready to accept the commit or push.
These additional variables will be available in the environment, in addition
to the variables the VCS normally provides:
- `PHABRICATOR_REPOSITORY` The PHID of the repository the hook is
executing for.
- - `PHABRICATOR_USER` The Phabricator username that the session is
+ - `PHABRICATOR_USER` The Phorge username that the session is
authenticated under.
- `PHABRICATOR_REMOTE_ADDRESS` The connection's remote address (that is,
the IP address of whoever is pushing or committing).
- `PHABRICATOR_REMOTE_PROTOCOL` The protocol the connection is using (for
example, "ssh" or "http").
diff --git a/src/docs/user/userguide/diffusion_hosting.diviner b/src/docs/user/userguide/diffusion_hosting.diviner
index 47197f52fd..c8a1c26eed 100644
--- a/src/docs/user/userguide/diffusion_hosting.diviner
+++ b/src/docs/user/userguide/diffusion_hosting.diviner
@@ -1,629 +1,629 @@
@title Diffusion User Guide: Repository Hosting
@group userguide
-Guide to configuring Phabricator repository hosting.
+Guide to configuring Phorge repository hosting.
Overview
========
-Phabricator can host repositories and provide authenticated read and write
+Phorge can host repositories and provide authenticated read and write
access to them over HTTP and SSH. This document describes how to configure
repository hosting.
Understanding Supported Protocols
=================================
-Phabricator supports hosting over these protocols:
+Phorge supports hosting over these protocols:
| VCS | SSH | HTTP |
|-----|-----|------|
| Git | Supported | Supported |
| Mercurial | Supported | Supported |
| Subversion | Supported | Not Supported |
All supported protocols handle reads (pull/checkout/clone) and writes
(push/commit). Of the two protocols, SSH is generally more robust, secure and
performant, but HTTP is easier to set up and supports anonymous access.
| | SSH | HTTP |
| |-----|------|
| Reads | Yes | Yes |
| Writes | Yes | Yes |
| Authenticated Access | Yes | Yes |
| Push Logs | Yes | Yes |
| Commit Hooks | Yes | Yes |
| Anonymous Access | No | Yes |
| Security | Better (Asymmetric Key) | Okay (Password) |
| Performance | Better | Okay |
| Setup | Hard | Easy |
Each repository can be configured individually, and you can use either
protocol, or both, or a mixture across different repositories.
SSH is recommended unless you need anonymous access, or are not able to
configure it for technical reasons.
Creating System User Accounts
=============================
-Phabricator uses two system user accounts, plus a third account if you
+Phorge uses two system user accounts, plus a third account if you
configure SSH access. This section will guide you through creating and
-configuring them. These are system user accounts on the machine Phabricator
-runs on, not Phabricator user accounts.
+configuring them. These are system user accounts on the machine Phorge
+runs on, not Phorge user accounts.
-The system accounts Phabricator uses are:
+The system accounts Phorge uses are:
- The user the webserver runs as. We'll call this `www-user`.
- The user the daemons run as. We'll call this `daemon-user`. This
user is the only user which will interact with the repositories directly.
Other accounts will `sudo` to this account in order to perform repository
operations.
- The user that humans will connect over SSH as. We'll call this `vcs-user`.
If you do not plan to make repositories available over SSH, you do not need
to create or configure this user.
To create these users:
- Create a `www-user` if one does not already exist. In most cases, this
user will already exist and you just need to identify which user it is. Run
your webserver as this user.
- Create a `daemon-user` if one does not already exist (you can call this user
whatever you want, or use an existing account). Below, you'll configure
the daemons to start as this user.
- Create a `vcs-user` if one does not already exist and you plan to set up
SSH. When users clone repositories, they will use a URI like
- `vcs-user@phabricator.yourcompany.com`, so common names for this user are
+ `vcs-user@phorge.yourcompany.com`, so common names for this user are
`git` or `hg`.
Continue below to configure these accounts.
-Configuring Phabricator
+Configuring Phorge
=======================
-Now that you have created or identified these accounts, update the Phabricator
+Now that you have created or identified these accounts, update the Phorge
configuration to specify them.
First, set `phd.user` to the `daemon-user`:
```
-phabricator/ $ ./bin/config set phd.user daemon-user
+phorge/ $ ./bin/config set phd.user daemon-user
```
Restart the daemons to make sure this configuration works properly. They should
start as the correct user automatically.
If you're using a `vcs-user` for SSH, you should also configure that:
```
-phabricator/ $ ./bin/config set diffusion.ssh-user vcs-user
+phorge/ $ ./bin/config set diffusion.ssh-user vcs-user
```
Next, you'll set up `sudo` permissions so these users can interact with one
another.
Configuring Sudo
================
The `www-user` and `vcs-user` need to be able to `sudo` as the `daemon-user`
so they can interact with repositories.
To grant them access, edit the `sudo` system configuration. On many systems,
you will do this by modifying the `/etc/sudoers` file using `visudo` or
`sudoedit`. In some cases, you may add a new file to `/etc/sudoers.d` instead.
To give a user account `sudo` access to run a list of binaries, add a line like
this to the configuration file (this example would grant `vcs-user` permission
to run `ls` as `daemon-user`):
```
vcs-user ALL=(daemon-user) SETENV: NOPASSWD: /path/to/bin/ls
```
The `www-user` needs to be able to run these binaries as the `daemon-user`:
- `git` (if using Git)
- `git-http-backend` (if using Git)
- `hg` (if using Mercurial)
- `ssh` (if configuring clusters)
If you plan to use SSH, the `vcs-user` needs to be able to run these binaries
as the `daemon-user`:
- `git` (if using Git)
- `git-upload-pack` (if using Git)
- `git-receive-pack` (if using Git)
- `hg` (if using Mercurial)
- `svnserve` (if using Subversion)
- `ssh` (if configuring clusters)
Identify the full paths to all of these binaries on your system and add the
appropriate permissions to the `sudo` configuration.
Normally, you'll add two lines that look something like this:
```
www-user ALL=(daemon-user) SETENV: NOPASSWD: /path/to/x, /path/to/y, ...
vcs-user ALL=(daemon-user) SETENV: NOPASSWD: /path/to/x, /path/to/y, ...
```
This is just a template. In the real configuration file, you need to:
- Replace `www-user`, `daemon-user` and `vcs-user` with the correct
usernames for your system.
- List every binary that these users need access to, as described above.
- Make sure each binary path is the full path to the correct binary location
on your system.
Before continuing, look for this line in your `sudo` configuration:
Defaults requiretty
If it's present, comment it out by putting a `#` at the beginning of the line.
With this option enabled, VCS SSH sessions won't be able to use `sudo`.
Additional SSH User Configuration
=================================
If you're planning to use SSH, you should also edit `/etc/passwd` and
`/etc/shadow` to make sure the `vcs-user` account is set up correctly.
**`/etc/shadow`**: Open `/etc/shadow` and find the line for the `vcs-user`
account.
The second field (which is the password field) must not be set to `!!`. This
value will prevent login.
If you have `usermod` on your system, you can adjust this value with:
```
$ sudo usermod -p NP vcs-user
```
If you do not have `usermod`, carefully edit the file and set the field value
to `NP` ("no password") instead of `!!`.
**`/etc/passwd`**: Open `/etc/passwd` and find the line for the `vcs-user`
account.
The last field (which is the login shell) must be set to a real shell. If it is
set to something like `/bin/false`, then `sshd` will not be able to execute
commands.
If you have `usermod` on your system, you can adjust this value with:
```
$ sudo usermod -s /bin/sh vcs-user
```
If you do not have `usermod`, carefully edit the file and change the field
to point at a real shell, usually `/bin/sh`.
Configuring HTTP
================
If you plan to serve repositories over authenticated HTTP, you need to set
`diffusion.allow-http-auth` in Config. If you don't plan to serve repositories
over HTTP (or plan to use only anonymous HTTP) you can leave this setting
disabled.
If you plan to use authenticated HTTP, you (and all other users) also need to
configure a VCS password for your account in {nav Settings > VCS Password}.
-Your VCS password must be a different password than your main Phabricator
+Your VCS password must be a different password than your main Phorge
password because VCS passwords are very easy to accidentally disclose. They are
often stored in plaintext in world-readable files, observable in `ps` output,
and present in command output and logs. We strongly encourage you to use SSH
instead of HTTP to authenticate access to repositories.
Otherwise, if you've configured system accounts above, you're all set. No
additional server configuration is required to make HTTP work. You should now
be able to fetch and push repositories over HTTP. See "Cloning a Repository"
below for more details.
If you're having trouble, see "Troubleshooting HTTP" below.
Configuring SSH
===============
SSH access requires some additional setup. You will configure and run a second,
restricted copy of `sshd` on the machine, on a different port from the standard
`sshd`. This special copy of `sshd` will serve repository requests and provide
-other Phabricator SSH services.
+other Phorge SSH services.
-NOTE: The Phabricator `sshd` service **MUST** be 6.2 or newer, because
-Phabricator relies on the `AuthorizedKeysCommand` option.
+NOTE: The Phorge `sshd` service **MUST** be 6.2 or newer, because
+Phorge relies on the `AuthorizedKeysCommand` option.
Before continuing, you must choose a strategy for which port each copy of
`sshd` will run on. The next section lays out various approaches.
SSHD Port Assignment
====================
The normal `sshd` that lets you administrate the host and the special `sshd`
which serves repositories can't run on the same port. In particular, only one
of them can run on port `22`, which will make it a bit inconvenient to access
the other one.
These instructions will walk you through configuring the alternate `sshd` on
port `2222`. This is easy to configure, but if you run the service on this port
users will clone and push to URIs like `ssh://git@host.com:2222/`, which is a
little ugly.
There are several different approaches you can use to mitigate or eliminate
this problem.
**Run on Port 2222**: You can do nothing, and just run the repository `sshd` on
port `2222` and accept the explicit port in the URIs. This is the simplest
approach, and you can always start here and clean things up later if you grow
tired of dealing with the port number.
**Use a Load Balancer**: You can configure a load balancer in front of the host
and have it forward TCP traffic on port `22` to port `2222`. Then users can
clone from `ssh://git@host.com/` without an explicit port number and you don't
need to do anything else.
This may be very easy to set up, particularly if you are hosted in AWS, and
is often the simplest and cleanest approach.
**Swap Ports**: You can move the administrative `sshd` to a new port, then run
-Phabricator `sshd` on port 22. This is somewhat complicated and can be a bit
+Phorge `sshd` on port 22. This is somewhat complicated and can be a bit
risky if you make a mistake. See "Moving the sshd Port" below for help.
**Change Client Config**: You can run on a nonstandard port, but configure SSH
on the client side so that `ssh` automatically defaults to the correct port
when connecting to the host. To do this, add a section like this to your
`~/.ssh/config`:
```
-Host phabricator.corporation.com
+Host phorge.corporation.com
Port 2222
```
(If you want, you can also add a default `User`.)
Command line tools like `ssh`, `git` and `hg` will now default to port
`2222` when connecting to this host.
A downside to this approach is that your users will each need to set up their
`~/.ssh/config` files individually.
This file also allows you to define short names for hosts using the `Host` and
-`HostName` options. If you choose to do this, be aware that Phabricator uses
+`HostName` options. If you choose to do this, be aware that Phorge uses
remote/clone URIs to figure out which repository it is operating in, but can
not resolve host aliases defined in your `ssh` config. If you create host
aliases they may break some features related to repository identification.
If you use this approach, you will also need to specify a port explicitly when
connecting to administrate the host. Any unit tests or other build automation
will also need to be configured or use explicit port numbers.
**Port Multiplexing**: If you have hardware access, you can power down the host
and find the network I/O pins on the motherboard (for onboard networking) or
network card.
Carefully strip and solder a short piece of copper wire between the pins for
the external interface `22` and internal `2222`, so the external interface can
receive traffic for both services.
(Make sure not to desolder the existing connection between external `22` and
internal `22` or you won't be able to connect normally to administrate the
host.)
The obvious downside to this approach is that it requires physical access to
the machine, so it won't work if you're hosted on a cloud provider.
SSHD Setup
==========
Now that you've decided how you'll handle port assignment, you're ready to
continue `sshd` setup.
If you plan to connect to a port other than `22`, you should set this port
-as `diffusion.ssh-port` in your Phabricator config:
+as `diffusion.ssh-port` in your Phorge config:
```
$ ./bin/config set diffusion.ssh-port 2222
```
This port is not special, and you are free to choose a different port, provided
you make the appropriate configuration adjustment below.
-**Configure and Start Phabricator SSHD**: Now, you'll configure and start a
-copy of `sshd` which will serve Phabricator services, including repositories,
+**Configure and Start Phorge SSHD**: Now, you'll configure and start a
+copy of `sshd` which will serve Phorge services, including repositories,
over SSH.
This instance will use a special locked-down configuration that uses
-Phabricator to handle authentication and command execution.
+Phorge to handle authentication and command execution.
There are three major steps:
- - Create a `phabricator-ssh-hook.sh` file.
- - Create a `sshd_phabricator` config file.
+ - Create a `phorge-ssh-hook.sh` file.
+ - Create a `sshd_phorge config file.
- Start a copy of `sshd` using the new configuration.
-**Create `phabricator-ssh-hook.sh`**: Copy the template in
-`phabricator/resources/sshd/phabricator-ssh-hook.sh` to somewhere like
-`/usr/libexec/phabricator-ssh-hook.sh` and edit it to have the correct
+**Create `phorge-ssh-hook.sh`**: Copy the template in
+`phorge/resources/sshd/phorge-ssh-hook.sh` to somewhere like
+`/usr/libexec/phorge-ssh-hook.sh` and edit it to have the correct
settings.
Both the script itself **and** the parent directory the script resides in must
be owned by `root`, and the script must have `755` permissions:
```
$ sudo chown root /path/to/somewhere/
-$ sudo chown root /path/to/somewhere/phabricator-ssh-hook.sh
-$ sudo chmod 755 /path/to/somewhere/phabricator-ssh-hook.sh
+$ sudo chown root /path/to/somewhere/phorge-ssh-hook.sh
+$ sudo chmod 755 /path/to/somewhere/phorge-ssh-hook.sh
```
If you don't do this, `sshd` will refuse to execute the hook.
-**Create `sshd_config` for Phabricator**: Copy the template in
-`phabricator/resources/sshd/sshd_config.phabricator.example` to somewhere like
-`/etc/ssh/sshd_config.phabricator`.
+**Create `sshd_config` for Phorge**: Copy the template in
+`phorge/resources/sshd/sshd_config.phabricator.example` to somewhere like
+`/etc/ssh/sshd_config.phorge`.
Open the file and edit the `AuthorizedKeysCommand`,
`AuthorizedKeysCommandUser`, and `AllowUsers` settings to be correct for your
system.
This configuration file also specifies the `Port` the service should run on.
If you intend to run on a non-default port, adjust it now.
-**Start SSHD**: Now, start the Phabricator `sshd`:
+**Start SSHD**: Now, start the Phorge `sshd`:
- sudo /path/to/sshd -f /path/to/sshd_config.phabricator
+ sudo /path/to/sshd -f /path/to/sshd_config.phorge
If you did everything correctly, you should be able to run this command:
```
-$ echo {} | ssh vcs-user@phabricator.yourcompany.com conduit conduit.ping
+$ echo {} | ssh vcs-user@phorge.yourcompany.com conduit conduit.ping
```
...and get a response like this:
```lang=json
-{"result":"phabricator.yourcompany.com","error_code":null,"error_info":null}
+{"result":"phorge.yourcompany.com","error_code":null,"error_info":null}
```
If you get an authentication error, make sure you added your public key in
{nav Settings > SSH Public Keys}. If you're having trouble, check the
troubleshooting section below.
Authentication Over SSH
=======================
To authenticate over SSH, users should add their public keys under
{nav Settings > SSH Public Keys}.
Cloning a Repository
====================
If you've already set up a hosted repository, you can try cloning it now. To
do this, browse to the repository's main screen in Diffusion. You should see
clone commands at the top of the page.
To clone the repository, just run the appropriate command.
If you don't see the commands or running them doesn't work, see below for tips
on troubleshooting.
Troubleshooting HTTP
====================
Some general tips for troubleshooting problems with HTTP:
- - Make sure `diffusion.allow-http-auth` is enabled in your Phabricator config.
+ - Make sure `diffusion.allow-http-auth` is enabled in your Phorge config.
- Make sure HTTP serving is enabled for the repository you're trying to
clone. You can find this in {nav Edit Repository > Hosting}.
- Make sure you've configured a VCS password. This is separate from your main
account password. You can configure this in {nav Settings > VCS Password}.
- Make sure the main repository screen in Diffusion shows a clone/checkout
command for HTTP. If it doesn't, something above isn't set up correctly:
double-check your configuration. You should see a `svn checkout http://...`,
`git clone http://...` or `hg clone http://...` command. Run that command
verbatim to clone the repository.
If you're using Git, using `GIT_CURL_VERBOSE` may help assess login failures.
To do so, specify it on the command line before the `git clone` command, like
this:
$ GIT_CURL_VERBOSE=1 git clone ...
This will make `git` print out a lot more information. Particularly, the line
with the HTTP response is likely to be useful:
< HTTP/1.1 403 Invalid credentials.
In many cases, this can give you more information about what's wrong.
Troubleshooting SSH
===================
Some general tips for troubleshooting problems with SSH:
- Check that you've configured `diffusion.ssh-user`.
- Check that you've configured `phd.user`.
- Make sure SSH serving is enabled for the repository you're trying to clone.
You can change this setting from a main repository screen in Diffusion by
{nav Edit Repository >
Edit Hosting >
Host Repository on Phabricator >
Save and Continue >
SSH Read Only or Read/Write >
Save Changes}.
- Make sure you've added an SSH public key to your account. You can do this
in {nav Settings > SSH Public Keys}.
- Make sure the main repository screen in Diffusion shows a clone/checkout
command for SSH. If it doesn't, something above isn't set up correctly.
You should see an `svn checkout svn+ssh://...`, `git clone ssh://...` or
`hg clone ssh://...` command. Run that command verbatim to clone the
repository.
- - Check your `phabricator-ssh-hook.sh` file for proper settings.
- - Check your `sshd_config.phabricator` file for proper settings.
+ - Check your `phorge-ssh-hook.sh` file for proper settings.
+ - Check your `sshd_config.phorge` file for proper settings.
To troubleshoot SSH setup: connect to the server with `ssh`, without running a
command. You may need to use the `-T` flag, and will need to use `-p` if you
are running on a nonstandard port. You should see a message like this one:
- $ ssh -T -p 2222 vcs-user@phabricator.yourcompany.com
- phabricator-ssh-exec: Welcome to Phabricator.
+ $ ssh -T -p 2222 vcs-user@phorge.yourcompany.com
+ phorge-ssh-exec: Welcome to Phorge.
You are logged in as alincoln.
You haven't specified a command to run. This means you're requesting an
- interactive shell, but Phabricator does not provide an interactive shell over
+ interactive shell, but Phorge does not provide an interactive shell over
SSH.
Usually, you should run a command like `git clone` or `hg push` rather than
connecting directly with SSH.
Supported commands are: conduit, git-receive-pack, git-upload-pack, hg,
svnserve.
If you see this message, all your SSH stuff is configured correctly. **If you
get a login shell instead, you've missed some major setup step: review the
documentation above.** If you get some other sort of error, double check these
settings:
- You're connecting as the `vcs-user`.
- The `vcs-user` has `NP` in `/etc/shadow`.
- The `vcs-user` has `/bin/sh` or some other valid shell in `/etc/passwd`.
- Your SSH private key is correct, and you've added the corresponding
- public key to Phabricator in the Settings panel.
+ public key to Phorge in the Settings panel.
If you can get this far, but can't execute VCS commands like `git clone`, there
is probably an issue with your `sudoers` configuration. Check:
- Your `sudoers` file is set up as instructed above.
- You've commented out `Defaults requiretty` in `sudoers`.
- You don't have multiple copies of the VCS binaries (like `git-upload-pack`)
on your system. You may have granted sudo access to one, while the VCS user
is trying to run a different one.
- You've configured `phd.user`.
- The `phd.user` has read and write access to the repositories.
It may also be helpful to run `sshd` in debug mode:
- $ /path/to/sshd -d -d -d -f /path/to/sshd_config.phabricator
+ $ /path/to/sshd -d -d -d -f /path/to/sshd_config.phorge
This will run it in the foreground and emit a large amount of debugging
information when you connect to it.
Finally, you can usually test that `sudoers` is configured correctly by
doing something like this:
$ su vcs-user
$ sudo -E -n -u daemon-user -- /path/to/some/vcs-binary --help
That will try to run the binary via `sudo` in a manner similar to the way that
-Phabricator will run it. This can give you better error messages about issues
+Phorge will run it. This can give you better error messages about issues
with `sudoers` configuration.
Miscellaneous Troubleshooting
=============================
- If you're getting an error about `svnlook` not being found, add the path
- where `svnlook` is located to the Phabricator configuration
+ where `svnlook` is located to the Phorge configuration
`environment.append-paths` (even if it already appears in PATH). This issue
is caused by SVN wiping the environment (including PATH) when invoking
commit hooks.
Moving the sshd Port
====================
If you want to move the standard (administrative) `sshd` to a different port to
-make Phabricator repository URIs cleaner, this section has some tips.
+make Phorge repository URIs cleaner, this section has some tips.
This is optional, and it is normally easier to do this by putting a load
-balancer in front of Phabricator and having it accept TCP traffic on port 22
+balancer in front of Phorge and having it accept TCP traffic on port 22
and forward it to some other port.
When moving `sshd`, be careful when editing the configuration. If you get it
wrong, you may lock yourself out of the machine. Restarting `sshd` generally
will not interrupt existing connections, but you should exercise caution. Two
strategies you can use to mitigate this risk are: smoke-test configuration by
starting a second `sshd`; and use a `screen` session which automatically
repairs configuration unless stopped.
To smoke-test a configuration, just start another `sshd` using the `-f` flag:
sudo /path/to/sshd -f /path/to/config_file.edited
You can then connect and make sure the edited config file is valid before
replacing your primary configuration file.
To automatically repair configuration, start a `screen` session with a command
like this in it:
sleep 60 ; mv sshd_config.good sshd_config ; /etc/init.d/sshd restart
The specific command may vary for your system, but the general idea is to have
the machine automatically restore configuration after some period of time if
you don't stop it. If you lock yourself out, this can fix things automatically.
Now that you're ready to edit your configuration, open up your `sshd` config
(often `/etc/ssh/sshd_config`) and change the `Port` setting to some other port,
like `222` (you can choose any port other than 22).
Port 222
Very carefully, restart `sshd`. Verify that you can connect on the new port:
ssh -p 222 ...
-Now you can move the Phabricator `sshd` to port 22, then adjust the value
-for `diffusion.ssh-port` in your Phabricator configuration.
+Now you can move the Phorge `sshd` to port 22, then adjust the value
+for `diffusion.ssh-port` in your Phorge configuration.
No Direct Pushes
================
You may get an error about "No Direct Pushes" when trying to push. This means
you are pushing directly to the repository instead of pushing through
-Phabricator. This is not supported: writes to hosted repositories must go
-through Phabricator so it can perform authentication, enforce permissions,
+Phorge. This is not supported: writes to hosted repositories must go
+through Phorge so it can perform authentication, enforce permissions,
write logs, proxy requests, apply rewriting, etc.
One way to do a direct push by mistake is to use a `file:///` URI to interact
with the repository from the same machine. This is not supported. Instead, use
one of the repository URIs provided in the web interface, even if you're
working on the same machine.
Another way to do a direct push is to misconfigure SSH (or not configure it at
all) so that none of the logic described above runs and you just connect
normally as a system user. In this case, the `ssh` test described above will
fail (you'll get a command prompt when you connect, instead of the message you
are supposed to get, as described above).
If you encounter this error: make sure you're using a remote URI given to
you by Diffusion in the web interface, then run through the troubleshooting
steps above carefully.
Sometimes users encounter this problem because they skip this whole document
assuming they don't need to configure anything. This will not work, and you
MUST configure things as described above for hosted repositories to work.
The technical reason this error occurs is that the `PHABRICATOR_USER` variable
is not defined in the environment when commit hooks run. This variable is set
-by Phabricator when a request passes through the authentication layer that this
+by Phorge when a request passes through the authentication layer that this
document provides instructions for configuring. Its absence indicates that the
-request did not pass through Phabricator.
+request did not pass through Phorge.
Next Steps
==========
Once hosted repositories are set up:
- learn about commit hooks with @{article:Diffusion User Guide: Commit Hooks}.
diff --git a/src/docs/user/userguide/diffusion_managing.diviner b/src/docs/user/userguide/diffusion_managing.diviner
index e3743526e9..c0b97cb53c 100644
--- a/src/docs/user/userguide/diffusion_managing.diviner
+++ b/src/docs/user/userguide/diffusion_managing.diviner
@@ -1,450 +1,450 @@
@title Diffusion User Guide: Managing Repositories
@group userguide
Guide to configuring and managing repositories in Diffusion.
Overview
========
After you create a new repository in Diffusion or select **Manage Repository**
from the main screen if an existing repository, you'll be taken to the
repository management interface for that repository.
On this interface, you'll find many options which allow you to configure the
behavior of a repository. This document walks through the options.
Basics
======
The **Basics** section of the management interface allows you to configure
the repository name, description, and identifiers. You can also activate or
deactivate the repository here, and configure a few other miscellaneous
settings.
Basics: Name
============
The repository name is a human-readable primary name for the repository. It
does not need to be unique
Because the name is not unique and does not have any meaningful restrictions,
it's fairly ambiguous and isn't very useful as an identifier. The other basic
information (primarily callsigns and short names) gives you control over
repository identifiers.
Basics: Callsigns
=================
Each repository can optionally be identified by a "callsign", which is a short
-uppercase string like "P" (for Phabricator) or "ARC" (for Arcanist).
+uppercase string like "P" (for Phorge) or "ARC" (for Arcanist).
The primary goal of callsigns is to namespace commits to SVN repositories: if
you use multiple SVN repositories, each repository has a revision 1, revision 2,
etc., so referring to them by number alone is ambiguous.
However, even for Git and Mercurial they impart additional information to human
readers and allow parsers to detect that something is a commit name with high
probability (and allow distinguishing between multiple copies of a repository).
Configuring a callsign can make interacting with a commonly-used repository
easier, but you may not want to bother assigning one to every repository if you
have some similar, templated, or rarely-used repositories.
If you choose to assign a callsign to a repository, it must be unique within an
install but do not need to be globally unique, so you are free to use the
single-letter callsigns for brevity. For example, Facebook uses "E" for the
Engineering repository, "O" for the Ops repository, "Y" for a Yum package
-repository, and so on, while Phabricator uses "P" and Arcanist uses "ARC".
+repository, and so on, while Phorge uses "P" and Arcanist uses "ARC".
Keeping callsigns brief will make them easier to use, and the use of
one-character callsigns is encouraged if they are reasonably evocative.
-If you configure a callsign like `XYZ`, Phabricator will activate callsign URIs
+If you configure a callsign like `XYZ`, Phorge will activate callsign URIs
and activate the callsign identifier (like `rXYZ`) for the repository. These
more human-readable identifiers can make things a little easier to interact
with.
Basics: Short Name
==================
Each repository can optionally have a unique short name. Short names must be
unique and have some minor restrictions to make sure they are unambiguous and
appropriate for use as directory names and in URIs.
Basics: Description
===================
You may optionally provide a brief (or, at your discretion, excruciatingly
long) human-readable description of the repository. This description will be
shown on the main repository page.
You can also create a `README` file at the repository root (or in any
subdirectory) to provide information about the repository. These formats are
supported:
| File Name | Rendered As...
|-------------------|---------------
| `README` | Plain Text
| `README.txt` | Plain Text
| `README.remarkup` | Remarkup
| `README.md` | Remarkup
| `README.rainbow` | Rainbow
Basics: Encoding
================
Before content from the repository can be shown in the web UI or embedded in
other contexts like email, it must be converted to UTF-8.
Most source code is written in UTF-8 or a subset of UTF-8 (like plain ASCII)
already, so everything will work fine. The majority of repositories do not need
to adjust this setting.
If your repository is primarily written in some other encoding, specify it here
-so Phabricator can convert from it properly when reading content to embed in
+so Phorge can convert from it properly when reading content to embed in
a webpage or email.
Basics: Dangerous Changes
=========================
By default, repositories are protected against dangerous changes. Dangerous
changes are operations which rewrite or destroy repository history (for
example, by deleting or rewriting branches). Normally, these take the form
of `git push --force` or similar.
It is normally a good idea to leave this protection enabled because most
scalable workflows rarely rewrite repository history and it's easy to make
mistakes which are expensive to correct if this protection is disabled.
If you do occasionally need to rewrite published history, you can treat this
option like a safety: disable it, perform required rewrites, then enable it
again.
If you fully disable this at the repository level, you can still use Herald to
selectively protect certain branches or grant this power to a limited set of
users.
This option is only available in Git and Mercurial, because it is impossible
to make dangerous changes in Subversion.
-This option has no effect if a repository is not hosted because Phabricator
+This option has no effect if a repository is not hosted because Phorge
can not prevent dangerous changes in a remote repository it is merely
observing.
Basics: Disable Publishing
==========================
You can disable publishing for a repository. For more details on what this
means, see @{article:Diffusion User Guide: Permanent Refs}.
This is primarily useful if you need to perform major maintenance on a
repository (like rewriting a large part of the repository history) and you
don't want the maintenance to generate a large volume of email and
notifications. You can disable publishing, apply major changes, wait for the
new changes to import, and then reactivate publishing.
Basics: Deactivate Repository
=============================
Repositories can be deactivated. Deactivating a repository has these effects:
- the repository will no longer be updated;
- users will no longer be able to clone/fetch/checkout the repository;
- users will no longer be able to push to the repository; and
- the repository will be hidden from view in default queries.
When repositories are created for the first time, they are deactivated. This
gives you an opportunity to customize settings, like adjusting policies or
configuring a URI to observe. You must activate a repository before it will
start working normally.
Basics: Delete Repository
=========================
Repositories can not be deleted from the web UI, so this option only gives you
information about how to delete a repository.
Repositories can only be deleted from the command line, with `bin/remove`:
```
$ ./bin/remove destroy <repository>
```
This command will permanently destroy the repository. For more information
about destroying things, see @{article:Permanently Destroying Data}.
Policies
========
The **Policies** section of the management interface allows you to review and
manage repository access policies.
You can configure granular access policies for each repository to control who
can view, clone, administrate, and push to the repository.
Policies: View
==============
The view policy for a repository controls who can view the repository from
-the web UI and clone, fetch, or check it out from Phabricator.
+the web UI and clone, fetch, or check it out from Phorge.
Users who can view a repository can also access the "Manage" interface to
review information about the repository and examine the edit history, but can
not make any changes.
Policies: Edit
==============
The edit policy for a repository controls who can change repository settings
using the "Manage" interface. In essence, this is permission to administrate
the repository.
You must be able to view a repository to edit it.
You do not need this permission to push changes to a repository.
Policies: Push
==============
The push policy for a repository controls who can push changes to the
repository.
-This policy has no effect if Phabricator is not hosting the repository, because
+This policy has no effect if Phorge is not hosting the repository, because
it can not control who is allowed to make changes to a remote repository it is
merely observing.
You must also be able to view a repository to push to it.
You do not need to be able to edit a repository to push to it.
Further restrictions on who can push (and what they can push) can be configured
for hosted repositories with Herald, which allows you to write more
-sophisticated rules that evaluate when Phabricator receives a push. To get
+sophisticated rules that evaluate when Phorge receives a push. To get
started with Herald, see @{article:Herald User Guide}.
Additionally, Git and Mercurial repositories have a setting which allows
you to **Prevent Dangerous Changes**. This setting is enabled by default and
will prevent any users from pushing changes which rewrite or destroy history.
URIs
====
-The **URIs** panel allows you to add and manage URIs which Phabricator will
+The **URIs** panel allows you to add and manage URIs which Phorge will
fetch from, serve from, and push to.
These options are covered in detail in @{article:Diffusion User Guide: URIs}.
Limits
======
The **Limits** panel allows you to configure limits and timeouts.
**Filesize Limit**: Allows you to set a maximum filesize for any file in the
repository. If a commit creates a larger file (or modifies an existing file so
it becomes too large) it will be rejected. This option only applies to hosted
repositories.
This limit is primarily intended to make it more difficult to accidentally push
very large files that shouldn't be version controlled (like logs, binaries,
machine learning data, or media assets). Pushing huge datafiles by mistake can
make the repository unwieldy by dramatically increasing how much data must be
transferred over the network to clone it, and simply reverting the changes
doesn't reduce the impact of this kind of mistake.
**Clone/Fetch Timeout**: Configure the internal timeout for creating copies
of this repository during operations like intracluster synchronization and
Drydock working copy construction. This timeout does not affect external
users.
**Touch Limit**: Apply a limit to the maximum number of paths that any commit
may touch. If a commit affects more paths than this limit, it will be rejected.
This option only applies to hosted repositories. Users may work around this
limit by breaking the commit into several smaller commits which each affect
fewer paths.
This limit is intended to offer a guard rail against users making silly
mistakes that create obviously mistaken changes, like copying an entire
repository into itself and pushing the result. This kind of change can take
some effort to clean up if it becomes part of repository history.
Note that if you move a file, both the old and new locations count as touched
paths. You should generally configure this limit to be more than twice the
number of files you anticipate any user ever legitimately wanting to move in
a single commit. For example, a limit of `20000` will let users move up to
10,000 files in a single commit, but will reject users mistakenly trying to
push a copy of another repository or a directory with a million logfiles or
whatever other kind of creative nonsense they manage to dream up.
Branches
========
-The **Branches** panel allows you to configure how Phabricator interacts with
+The **Branches** panel allows you to configure how Phorge interacts with
branches.
This panel is not available for Subversion repositories, because Subversion
does not have formal branches.
You can configure a **Default Branch**. This controls which branch is shown by
-default in the UI. If no branch is provided, Phabricator will use `master` in
+default in the UI. If no branch is provided, Phorge will use `master` in
Git and `default` in Mercurial.
**Fetch Refs**: In Git, if you are observing a remote repository, you can
specify that you only want to fetch a subset of refs using "Fetch Refs".
Normally, all refs (`refs/*`) are fetched. This means all branches, all tags,
and all other refs.
If you want to fetch only a few specific branches, you can list only those
branches. For example, this will fetch only the branch "master":
```
refs/heads/master
```
You can fetch all branches and tags (but ignore other refs) like this:
```
refs/heads/*
refs/tags/*
```
This may be useful if the remote is on a service like GitHub, GitLab, or
Gerrit and uses custom refs (like `refs/pull/` or `refs/changes/`) to store
-metadata that you don't want to bring into Phabricator.
+metadata that you don't want to bring into Phorge.
**Permanent Refs**: To learn more about permanent refs, see:
- @{article:Diffusion User Guide: Permanent Refs}
-By default, Phabricator considers all branches to be permanent refs. If you
+By default, Phorge considers all branches to be permanent refs. If you
only want some branches to be treated as permanent refs, specify them here.
When specifying branches, you should enter one branch name per line. You can
use regular expressions to match branches by wrapping an expression in
`regexp(...)`. For example:
| Example | Effect |
|---------|--------|
| `master` | Only the `master` branch is a permanent ref.
| `regexp(/^release-/)` | Branches are permanent if they start with `release-`.
| `regexp(/^(?!temp-)/)` | Branches named `temp-` are not permanent.
Staging Area
============
The **Staging Area** panel configures staging areas, used to make proposed
changes available to build and continuous integration systems.
For more details, see @{article:Harbormaster User Guide}.
Automation
==========
-The **Automation** panel configures support for allowing Phabricator to make
+The **Automation** panel configures support for allowing Phorge to make
writes directly to the repository, so that it can perform operations like
automatically landing revisions from the web UI.
For details on repository automation, see
@{article:Drydock User Guide: Repository Automation}.
Symbols
======
The **Symbols** panel allows you to customize how symbols (like class and
function names) are linked when viewing code in the repository, and when
viewing revisions which propose code changes to the repository.
To take advantage of this feature, you need to do additional work to build
symbol indexes. For details on configuring and populating symbol indexes, see
@{article:User Guide: Symbol Indexes}.
Repository Identifiers and Names
================================
Repositories have several short identifiers which you can use to refer to the
repository. For example, if you use command-line administrative tools to
interact with a repository, you'll provide one of these identifiers:
```
$ ./bin/repository update <identifier>
```
The identifiers available for a repository depend on which options are
configured. Each repository may have several identifiers:
- An **ID** identifier, like `R123`. This is available for all repositories.
- A **callsign** identifier, like `rXY`. This is available for repositories
with a callsign.
- A **short name** identifier, like `xylophone`. This is available for
repositories with a short name.
All three identifiers can be used to refer to the repository in cases where
the intent is unambiguous, but only the first two forms work in ambiguous
contexts.
-For example, if you type `R123` or `rXY` into a comment, Phabricator will
+For example, if you type `R123` or `rXY` into a comment, Phorge will
recognize them as references to the repository. If you type `xylophone`, it
assumes you mean the word "xylophone".
Only the `R123` identifier is immutable: the others can be changed later by
adjusting the callsign or short name for the repository.
Commit Identifiers
==================
Diffusion uses repository identifiers and information about the commit itself
to generate globally unique identifiers for each commit, like `rE12345`.
Each commit may have several identifiers:
- A repository **ID** identifier, like `R123:abcdef123...`.
- A repository **callsign** identifier, like `rXYZabcdef123...`. This only
works if a repository has a callsign.
- Any unique prefix of the commit hash.
-Git and Mercurial use commit hashes to identify commits, and Phabricator will
+Git and Mercurial use commit hashes to identify commits, and Phorge will
recognize a commit if the hash prefix is unique and sufficiently long. Commit
hashes qualified with a repository identifier must be at least 5 characters
long; unqualified commit hashes must be at least 7 characters long.
In Subversion, commit identifiers are sequential integers and prefixes can not
be used to identify them.
-When rendering the name of a Git or Mercurial commit hash, Phabricator tends to
+When rendering the name of a Git or Mercurial commit hash, Phorge tends to
shorten it to 12 characters. This "short length" is relatively long compared to
Git itself (which often uses 7 characters). See this post on the LKML for a
historical explanation of Git's occasional internal use of 7-character hashes:
https://lkml.org/lkml/2010/10/28/287
Because 7-character hashes are likely to collide for even moderately large
repositories, Diffusion generally uses either a 12-character prefix (which makes
collisions very unlikely) or the full 40-character hash (which makes collisions
astronomically unlikely).
Next Steps
==========
Continue by:
- returning to the @{article:Diffusion User Guide}.
diff --git a/src/docs/user/userguide/diffusion_permanent.diviner b/src/docs/user/userguide/diffusion_permanent.diviner
index fba4341c0c..f28e3259f7 100644
--- a/src/docs/user/userguide/diffusion_permanent.diviner
+++ b/src/docs/user/userguide/diffusion_permanent.diviner
@@ -1,81 +1,81 @@
@title Diffusion User Guide: Permanent Refs
@group userguide
Explains when Diffusion will take actions in response to discovering commits.
Overview
========
Diffusion can close tasks and revisions and take other actions when commits
-appear in a repository (either because they were pushed to Phabricator, or
-because they were pushed to some remote which Phabricator is observing).
+appear in a repository (either because they were pushed to Phorge, or
+because they were pushed to some remote which Phorge is observing).
This document explains when Diffusion acts on commits and how to configure this
behavior.
Publishing Commits
==================
Diffusion distinguishes between "pushed" and "published" commits.
Not all commits that are pushed to a repository are destined for greatness:
for example, many tools push temporary commits to secret places like
`refs/pull/123`, `refs/notes/*`, or `refs/changes/12/345678/1`.
Sometimes, human users intentionally push changes to branches like
-"tmp-hack-ignore-123". This is formally discouraged by Phabricator, but the
+"tmp-hack-ignore-123". This is formally discouraged by Phorge, but the
practice is so widespread that we've given up trying to stop anyone from doing
it.
-Phabricator will import these commits and create pages for them so you can view
+Phorge will import these commits and create pages for them so you can view
them in the web UI and link to them, but does not take any other actions until
they are "published".
A commit is "published" when it becomes reachable from a permanent ref. By
default, all branches are permanent refs, so pushing a commit to "master" will
publish it, but pushing a commit to `refs/pull/123` (either directly, or by
using a tool like GitHub) will not.
Usually, commits are published by pushing them directly to a permanent branch
like "master", or by merging a temporary branch into a permanent branch.
-When a commit is published, Phabricator acts on it and:
+When a commit is published, Phorge acts on it and:
- sends email;
- delivers notifications;
- publishes a feed story;
- triggers Audits;
- runs Herald rules;
- updates mentioned objects;
- closes referenced tasks; and
- closes associated revisions.
Configuring Repositories
========================
You can control publishing behavior in two primary ways: by configuring
which refs are considered to be permanent refs, and by disabling publishing
entirely.
By default, all branches are considered permanent refs and all other refs
(including tags and other arbitrary custom refs) are considered nonpermanent.
This means that, by default, pushing commits to a branch like
"tmp-hack-ignore-123" will publish those commits.
If you want to be free to push commits to temporary branches like this and
only want commits on certain branches (like "master") to be published,
configure which refs are treated as permanent by editing
{nav Branches > Permanent Refs} from the "Manage" page of the repository.
To disable publishing entirely, select {nav Basics > Disable Publishing}.
Next Steps
==========
Continue by:
- troubleshooting in greater depth with
@{article:Troubleshooting Repository Imports}.
diff --git a/src/docs/user/userguide/diffusion_symbols.diviner b/src/docs/user/userguide/diffusion_symbols.diviner
index 7d14ad92b2..6d6cc8847b 100644
--- a/src/docs/user/userguide/diffusion_symbols.diviner
+++ b/src/docs/user/userguide/diffusion_symbols.diviner
@@ -1,97 +1,97 @@
@title Diffusion User Guide: Symbol Indexes
@group userguide
Guide to configuring and using the symbol index.
= Overview =
-Phabricator can maintain a symbol index, which keeps track of where classes
+Phorge can maintain a symbol index, which keeps track of where classes
and functions are defined in the codebase. Once you set up indexing, you can
use the index to do things like:
- jump to symbol definitions from Differential code reviews and Diffusion
code browsing by ctrl-clicking (cmd-click on Mac) symbols
- search for symbols from the quick-search
- let the IRC bot answer questions like "Where is SomeClass?"
NOTE: Because this feature depends on the syntax highlighter, it will work
better for some languages than others. It currently works fairly well for PHP,
but your mileage may vary for other languages.
= Populating the Index =
To populate the index, you need to write a script which identifies symbols in
your codebase and set up a cronjob which pipes its output to:
./scripts/symbols/import_repository_symbols.php
-Phabricator includes a script which can identify symbols in PHP projects:
+Phorge includes a script which can identify symbols in PHP projects:
./scripts/symbols/generate_php_symbols.php
-Phabricator also includes a script which can identify symbols in any
+Phorge also includes a script which can identify symbols in any
programming language that has classes and/or functions, and is supported by
Exuberant Ctags (http://ctags.sourceforge.net):
./scripts/symbols/generate_ctags_symbols.php
If you want to identify symbols from another language, you need to write a
script which can export them (for example, maybe by parsing a `ctags` file).
The output format of the script should be one symbol per line:
<context> <name> <type> <lang> <line> <path>
For example:
ExampleClass exampleMethod function php 13 /src/classes/ExampleClass.php
Context is, broadly speaking, the scope or namespace where the symbol is
defined. For object-oriented languages, this is probably a class name. The
symbols with that context are class constants, methods, properties, nested
classes, etc. When printing symbols without a context (those that are defined
globally, for instance), the `<context>` field should be empty (that is, the
line should start with a space).
Your script should enumerate all the symbols in your project, and provide paths
from the project root (where ".arcconfig" is) beginning with a "/".
You can look at `generate_php_symbols.php` for an example of how you might
write such a script, and run this command to see its output:
- $ cd phabricator/
+ $ cd phorge/
$ find . -type f -name '*.php' | ./scripts/symbols/generate_php_symbols.php
To actually build the symbol index, pipe this data to the
`import_repository_symbols.php` script, providing the repository callsign:
$ ./scripts/symbols/import_repository_symbols.php REPO < symbols_data
Then just set up a cronjob to run that however often you like.
You can test that the import worked by querying for symbols using the Conduit
method `diffusion.findsymbols`. Some features (like that method, and the
IRC bot integration) will start working immediately. Others will require more
configuration.
= Advanced Configuration =
You can configure some more options by going to {nav Diffusion > (Select
repository) > Edit Repository > Edit Symbols}, and filling out these fields:
- **Indexed Languages**: Fill in all the languages you've built indexes for.
You can leave this blank for "All languages".
- **Uses Symbols From**: If this project depends on other repositories, add
the other repositories which symbols should be looked for here. For example,
- Phabricator lists "Arcanist" because it uses classes and functions defined
+ Phorge lists "Arcanist" because it uses classes and functions defined
in `arcanist/`.
== External Symbols ==
-By @{article@phabcontrib:Adding New Classes}, you can teach Phabricator
+By @{article@contrib:Adding New Classes}, you can teach Phorge
about symbols from the outside world.
Extend @{class:DiffusionExternalSymbolsSource}; Once loaded, your new
implementation will be used any time a symbol is queried.
See @{class:DiffusionPhpExternalSymbolsSource} and
@{class:DiffusionPythonExternalSymbolsSource} for example implementations.
diff --git a/src/docs/user/userguide/diffusion_updates.diviner b/src/docs/user/userguide/diffusion_updates.diviner
index 7012e6228e..dc3c7dfef1 100644
--- a/src/docs/user/userguide/diffusion_updates.diviner
+++ b/src/docs/user/userguide/diffusion_updates.diviner
@@ -1,123 +1,123 @@
@title Diffusion User Guide: Repository Updates
@group userguide
Explains how Diffusion updates repositories to discover new changes.
Overview
========
-When Phabricator is configured to import repositories which are hosted
+When Phorge is configured to import repositories which are hosted
elsewhere, it needs to poll those repositories for changes. If it polls too
frequently, it can create too much load locally and on remote services. If it
polls too rarely, it may take a long time for commits to show up in the web
interface.
This document describes the rules around polling and how to understand and
adjust the behavior. In general:
- - Phabricator chooses a default poll interval based on repository
+ - Phorge chooses a default poll interval based on repository
activity. These intervals range from every 15 seconds (for active
repositories) to every 6 hours (for repositories with no commits in two
months).
- - If you use `arc` to push commits, or you host repositories on Phabricator,
+ - If you use `arc` to push commits, or you host repositories on Phorge,
repositories automatically update after changes are pushed.
- If you don't use `arc` and your repository is hosted elsewhere, this
document describes ways you can make polling more responsive.
Default Behavior
================
-By default, Phabricator determines how frequently to poll repositories by
+By default, Phorge determines how frequently to poll repositories by
examining how long it has been since the last commit. In most cases this is
fairly accurate and produces good behavior. In particular, it automatically
reduces the polling frequency for rarely-used repositories. This dramatically
reduces load for installs with a large number of inactive repositories, which
is common.
For repositories with activity in the last 3 days, we wait 1 second for every
10 minutes without activity. The table below has some examples.
| Time Since Commit | Poll Interval |
|-------------------|------------------|
| //Minimum// | 15 seconds |
| 6h | about 30 seconds |
| 12h | about 1 minute |
| 1 day | about 2 minutes |
| 2 days | about 5 minutes |
| 3 days | about 7 minutes |
This means that you may need to wait about 2 minutes for the first commit to
be imported in the morning, and about 5 minutes after a long weekend, but other
commits to active repositories should usually be recognized in 30 seconds or
less.
For repositories with no activity in the last 3 days, we wait longer between
updates (1 second for every 4 minutes without activity). The table below has
some examples.
| Time Since Commit | Poll Interval |
|-------------------|------------------|
| 4 days | about 30 minutes |
| 7 days | about 45 minutes |
| 10 days | about 1 hour |
| 20 days | about 2 hours |
| 30 days | about 3 hours |
| //Maximum// | 6 hours |
You can find the exact default poll frequency of a repository in
Diffusion > (Choose a Repository) > Edit Repository, under "Update Frequency".
You can also see the time when the repository was last updated in this
interface.
Repositories that are currently importing are always updated at the minimum
update frequency so the import finishes as quickly as possible.
Triggering Repository Updates
=============================
-If you want Phabricator to update a repository more quickly than the default
+If you want Phorge to update a repository more quickly than the default
update frequency (for example, because you just pushed a commit to it), you can
-tell Phabricator that it should schedule an update as soon as possible.
+tell Phorge that it should schedule an update as soon as possible.
There are several ways to do this:
- If you push changes with `arc land` or `arc commit`, this will be done
for you automatically. These commits should normally be recognized within
a few seconds.
- - If your repository is hosted on Phabricator, this will also be done for you
+ - If your repository is hosted on Phorge, this will also be done for you
automatically.
- You can schedule an update from the web interface, in Diffusion >
(Choose a Repository) > Manage Repository > Status > Update Now.
- You can make a call to the Conduit API method `diffusion.looksoon`. This
- hints to Phabricator that it should poll a repository as soon as it can.
+ hints to Phorge that it should poll a repository as soon as it can.
All of the other mechanisms do this under the hood.
In particular, you may be able to add a commit hook to your external repository
which calls `diffusion.looksoon`. This should make an external repository about
as responsive as a hosted repository.
If a repository has an update scheduled, the Diffusion > (Choose a
Repository) > Edit Repository interface will show that the repository is
prioritized and will be updated soon.
Troubleshooting Updates
=======================
You can manually run a repository update from the command line to troubleshoot
issues, using the `--trace` flag to get full details:
- phabricator/ $ ./bin/repository update --trace <repository>
+ phorge/ $ ./bin/repository update --trace <repository>
To catch potential issues with permissions, run this command as the same user
that the daemons run as.
Next Steps
==========
Continue by:
- troubleshooting in greater depth with
@{article:Troubleshooting Repository Imports}.
diff --git a/src/docs/user/userguide/diffusion_uris.diviner b/src/docs/user/userguide/diffusion_uris.diviner
index a19d38d3a5..1e186ce6ee 100644
--- a/src/docs/user/userguide/diffusion_uris.diviner
+++ b/src/docs/user/userguide/diffusion_uris.diviner
@@ -1,319 +1,319 @@
@title Diffusion User Guide: URIs
@group userguide
Guide to configuring repository URIs for fetching, cloning and mirroring.
Overview
========
-Phabricator can create, host, observe, mirror, proxy, and import repositories.
+Phorge can create, host, observe, mirror, proxy, and import repositories.
For example, you can:
-**Host Repositories**: Phabricator can host repositories locally. Phabricator
+**Host Repositories**: Phorge can host repositories locally. Phorge
maintains the writable master version of the repository, and you can push and
pull the repository. This is the most straightforward kind of repository
configuration, and similar to repositories on other services like GitHub or
Bitbucket.
-**Observe Repositories**: Phabricator can create a copy of an repository which
+**Observe Repositories**: Phorge can create a copy of an repository which
is hosted elsewhere (like GitHub or Bitbucket) and track updates to the remote
-repository. This will create a read-only copy of the repository in Phabricator.
+repository. This will create a read-only copy of the repository in Phorge.
-**Mirror Repositories**: Phabricator can publish any repository to mirrors,
+**Mirror Repositories**: Phorge can publish any repository to mirrors,
overwriting them with an exact copy of the repository that stays up to date as
-the source changes. This works with both local repositories that Phabricator is
-hosting and remote repositories that Phabricator is observing.
+the source changes. This works with both local repositories that Phorge is
+hosting and remote repositories that Phorge is observing.
**Proxy Repositories**: If you are observing a repository, you can allow users
-to read Phabricator's copy of the repository. Phabricator supports granular
+to read Phorge's copy of the repository. Phorge supports granular
read permissions, so this can let you open a private repository up a little
bit in a flexible way.
**Import Repositories**: If you have a repository elsewhere that you want to
-host on Phabricator, you can observe the remote repository first, then turn
+host on Phorge, you can observe the remote repository first, then turn
the tracking off once the repository fully synchronizes. This allows you to
-copy an existing repository and begin hosting it in Phabricator.
+copy an existing repository and begin hosting it in Phorge.
You can also import repositories by creating an empty hosted repository and
then pushing everything to the repository directly.
-You configure the behavior of a Phabricator repository by adding and
+You configure the behavior of a Phorge repository by adding and
configuring URIs and marking them to be fetched from, mirrored to, clonable,
and so on. By configuring all the URIs that a repository should interact with
and expose to users, you configure the read, write, and mirroring behavior
of the repository.
The remainder of this document walks through this configuration in greater
detail.
Host a Repository
=================
-You can create new repositories that Phabricator will host, like you would
-create repositories on services like GitHub or Bitbucket. Phabricator will
-serve a read-write copy of the repository and you can clone it from Phabricator
-and push changes to Phabricator.
+You can create new repositories that Phorge will host, like you would
+create repositories on services like GitHub or Bitbucket. Phorge will
+serve a read-write copy of the repository and you can clone it from Phorge
+and push changes to Phorge.
-If you haven't already, you may need to configure Phabricator for hosting
+If you haven't already, you may need to configure Phorge for hosting
before you can create your first hosted repository. For a detailed guide,
see @{article:Diffusion User Guide: Repository Hosting}.
This is the default mode for new repositories. To host a repository:
- Create a new repository.
- Activate it.
-Phabricator will create an empty repository and allow you to fetch from it and
+Phorge will create an empty repository and allow you to fetch from it and
push to it.
Observe a Repository
====================
If you have an existing repository hosted on another service (like GitHub,
-Bitbucket, or a private server) that you want to work with in Phabricator,
-you can configure Phabricator to observe it.
+Bitbucket, or a private server) that you want to work with in Phorge,
+you can configure Phorge to observe it.
-When observing a repository, Phabricator will keep track of changes in the
+When observing a repository, Phorge will keep track of changes in the
remote repository and allow you to browse and interact with the repository from
the web UI in Diffusion and other applications, but you can continue hosting it
elsewhere.
To observe a repository:
- Create a new repository, but don't activate it yet.
- Add the remote URI you want to observe as a repository URI.
- Set the **I/O Type** for the URI to **Observe**.
- If necessary, configure a credential.
- Activate the repository.
-Phabricator will perform an initial import of the repository, creating a local
+Phorge will perform an initial import of the repository, creating a local
read-only copy. Once this process completes, it will continue keeping track of
changes in the remote, fetching them, and reflecting them in the UI.
Mirror a Repository
===================
NOTE: Mirroring is not supported in Subversion.
-You can create a read-only mirror of an existing repository. Phabricator will
+You can create a read-only mirror of an existing repository. Phorge will
continuously publish the state of the source repository to the mirror, creating
an exact copy.
-For example, if you have a repository hosted in Phabricator that you want to
-mirror to GitHub, you can configure Phabricator to automatically maintain the
+For example, if you have a repository hosted in Phorge that you want to
+mirror to GitHub, you can configure Phorge to automatically maintain the
mirror. This is how the upstream repositories are set up.
The mirror copy must be read-only for users because any writes made to the
-mirror will be undone when Phabricator updates it. The mirroring process copies
+mirror will be undone when Phorge updates it. The mirroring process copies
the entire repository state exactly, so the remote state will be completely
replaced with an exact copy of the source repository. This may remove or
destroy information. Normally, you should only mirror to an empty repository.
-You can mirror any repository, even if Phabricator is only observing it and not
+You can mirror any repository, even if Phorge is only observing it and not
hosting it directly.
To begin mirroring a repository:
- Create a hosted or observed repository by following the relevant
instructions above.
- Add the remote URI you want to mirror to as a repository URI.
- Set the **I/O Type** for the URI to **Mirror**.
- If necessary, configure a credential.
To stop mirroring:
- Disable the mirror URI; or
- Change the **I/O Type** for the URI to **None**.
Import a Repository
===================
If you have an existing repository that you want to move so it is hosted on
-Phabricator, there are three ways to do it:
+Phorge, there are three ways to do it:
**Observe First**: //(Git, Mercurial)// Observe the existing repository first,
-according to the instructions above. Once Phabricator's copy of the repository
+according to the instructions above. Once Phorge's copy of the repository
is fully synchronized, change the **I/O Type** for the **Observe** URI to
**None** to stop fetching changes from the remote.
-By default, this will automatically make Phabricator's copy of the repository
+By default, this will automatically make Phorge's copy of the repository
writable, and you can begin pushing to it. If you've adjusted URI
configuration away from the defaults, you may need to set at least one URI
to **Read/Write** mode so you can push to it.
**Push Everything**: //(Git, Mercurial, Subversion)// Create a new empty hosted
repository according to the instructions above. Once the empty repository
initializes, push your entire existing repository to it.
In Subversion, you can do this with the `svnsync` tool.
**Copy on Disk**: //(Git, Mercurial, Subversion)// Create a new empty hosted
repository according to the instructions above, but do not activate it yet.
Using the **Storage** tab, find the location of the repository's working copy
on disk, and place a working copy of the repository you wish to import there.
For Git and Mercurial, use a bare working copy for best results.
This is the only way to import a Subversion repository because only the master
copy of the repository has history.
Once you've put a working copy in the right place on disk, activate the
repository.
Builtin Clone URIs
==================
-By default, Phabricator automatically exposes and activates HTTP, HTTPS and
+By default, Phorge automatically exposes and activates HTTP, HTTPS and
SSH clone URIs by examining configuration.
**HTTP**: The `http://` clone URI will be available if these conditions are
satisfied:
- `diffusion.allow-http-auth` must be enabled or the repository view policy
must be "Public".
- The repository must be a Git or Mercurial repository.
- `security.require-https` must be disabled.
**HTTPS**: The `https://` clone URI will be available if these conditions are
satisfied:
- `diffusion.allow-http-auth` must be enabled or the repository view policy
must be "Public".
- The repository must be a Git or Mercurial repository.
- The `phabricator.base-uri` protocol must be `https://`.
**SSH**: The `ssh://` or `svn+ssh://` clone URI will be available if these
conditions are satisfied:
- `phd.user` must be configured.
Customizing Displayed Clone URIs
================================
If you have an unusual configuration and want the UI to offers users specific
-clone URIs other than the URIs that Phabricator serves or interacts with, you
+clone URIs other than the URIs that Phorge serves or interacts with, you
can add those URIs with the **I/O Type** set to **None** and then set their
**Display Type** to **Always**.
Likewise, you can set the **Display Type** of any URIs you do //not// want
to be visible to **Never**.
This allows you to precisely configure which clone URIs are shown to users for
a repository.
Reference: I/O Types
====================
This section details the available **I/O Type** options for URIs.
-Each repository has some **builtin** URIs. These are URIs hosted by Phabricator
+Each repository has some **builtin** URIs. These are URIs hosted by Phorge
itself. The modes available for each URI depend primarily on whether it is a
builtin URI or not.
-**Default**: This setting has Phabricator guess the correct option for the
+**Default**: This setting has Phorge guess the correct option for the
URI.
For **builtin** URIs, the default behavior is //Read/Write// if the repository
is hosted, and //Read-Only// if the repository is observed.
For custom URIs, the default type is //None// because we can not automatically
guess if you want to ignore, observe, or mirror a URI and //None// is the
safest default.
-**Observe**: Phabricator will observe this repository and regularly fetch any
+**Observe**: Phorge will observe this repository and regularly fetch any
changes made to it to a local read-only copy.
You can not observe builtin URIs because reading a repository from itself
does not make sense.
You can not add a URI in Observe mode if an existing builtin URI is in
//Read/Write// mode, because this would mean the repository had two different
authorities: the observed remote copy and the hosted local copy. Take the
other URI out of //Read/Write// mode first.
WARNING: If you observe a remote repository, the entire state of the working
-copy that Phabricator maintains will be deleted and replaced with the state of
-the remote. If some changes are present only in Phabricator's working copy,
+copy that Phorge maintains will be deleted and replaced with the state of
+the remote. If some changes are present only in Phorge's working copy,
they will be unrecoverably destroyed.
-**Mirror**: Phabricator will push any changes made to this repository to the
+**Mirror**: Phorge will push any changes made to this repository to the
remote URI, keeping a read-only mirror hosted at that URI up to date.
This works for both observed and hosted repositories.
This option is not available for builtin URIs because it does not make sense
to mirror a repository to itself.
It is possible to mirror a repository to another repository that is also
-hosted by Phabricator by adding that other repository's URI, although this is
+hosted by Phorge by adding that other repository's URI, although this is
silly and probably very rarely of any use.
WARNING: If you mirror to a remote repository, the entire state of that remote
-will be replaced with the state of the working copy Phabricator maintains. If
+will be replaced with the state of the working copy Phorge maintains. If
some changes are present only in the remote, they will be unrecoverably
destroyed.
-**None**: Phabricator will not fetch changes from or push changes to this URI.
+**None**: Phorge will not fetch changes from or push changes to this URI.
For builtin URIs, it will not let users fetch changes from or push changes to
this URI.
You can use this mode to turn off an Observe URI after an import, stop a Mirror
URI from updating, or to add URIs that you're only using to customize which
-clone URIs are displayed to the user but don't want Phabricator to interact
+clone URIs are displayed to the user but don't want Phorge to interact
with directly.
-**Read Only**: Phabricator will serve the repository from this URI in read-only
+**Read Only**: Phorge will serve the repository from this URI in read-only
mode. Users will be able to fetch from it but will not be able to push to it.
-Because Phabricator must be able to serve the repository from URIs configured
+Because Phorge must be able to serve the repository from URIs configured
in this mode, this option is only available for builtin URIs.
-**Read/Write**: Phabricator will serve the repository from this URI in
+**Read/Write**: Phorge will serve the repository from this URI in
read/write mode. Users will be able to fetch from it and push to it.
URIs can not be set into this mode if another URI is set to //Observe// mode,
because that would mean the repository had two different authorities: the
observed remote copy and the hosted local copy. Take the other URI out of
//Observe// mode first.
-Because Phabricator must be able to serve the repository from URIs configured
+Because Phorge must be able to serve the repository from URIs configured
in this mode, this option is only available for builtin URIs.
Reference: Display Types
========================
This section details the available **Display Type** options for URIs.
-**Default**: Phabricator will guess the correct option for the URI. It
+**Default**: Phorge will guess the correct option for the URI. It
guesses based on the configured **I/O Type** and whether the URI is
**builtin** or not.
For //Observe//, //Mirror// and //None// URIs, the default is //Never//.
For builtin URIs in //Read Only// or //Read/Write// mode, the most
human-readable URI defaults to //Always// and the others default to //Never//.
**Always**: This URI will be shown to users as a clone/checkout URI. You can
add URIs in this mode to customize exactly what users are shown.
**Never**: This URI will not be shown to users. You can hide less-preferred
URIs to guide users to the URIs they should be using to interact with the
repository.
Next Steps
==========
Continue by:
- - configuring Phabricator to host repositories with
+ - configuring Phorge to host repositories with
@{article:Diffusion User Guide: Repository Hosting}.
diff --git a/src/docs/user/userguide/diviner.diviner b/src/docs/user/userguide/diviner.diviner
index 01484be14c..1c0b6f4f78 100644
--- a/src/docs/user/userguide/diviner.diviner
+++ b/src/docs/user/userguide/diviner.diviner
@@ -1,95 +1,95 @@
@title Diviner User Guide
@group userguide
Using Diviner, a documentation generator.
Overview
========
Diviner is an application for creating technical documentation.
-This article is maintained in a text file in the Phabricator repository and
+This article is maintained in a text file in the Phorge repository and
generated into the display document you are currently reading using Diviner.
Beyond generating articles, Diviner can also analyze source code and generate
documentation about classes, methods, and other primitives.
Generating Documentation
========================
To generate documentation, run:
- phabricator/ $ ./bin/diviner generate --book <book>
+ phorge/ $ ./bin/diviner generate --book <book>
Diviner ".book" Files
=====================
Diviner documentation books are configured using JSON `.book` files, which
look like this:
name=example.book
{
"name" : "example",
"title" : "Example Documentation",
"short" : "Example Docs",
"root" : ".",
"uri.source" : "http://example.com/diffusion/X/browse/master/%f$%l",
"rules" : {
"(\\.diviner$)" : "DivinerArticleAtomizer"
},
"exclude" : [
"(^externals/)",
"(^scripts/)",
"(^support/)"
],
"groups" : {
"forward" : {
"name" : "Doing Stuff"
},
"reverse" : {
"name" : "Undoing Stuff"
}
}
}
The properties in this file are:
- `name`: Required. Short, unique name to identify the documentation book.
This will be used in URIs, so it should not have special characters. Good
names are things like `"example"` or `"libcabin"`.
- `root`: Required. The root directory (relative to the `.book` file) which
documentation should be generated from. Often this will be a value like
`"../../"`, to specify the project root (for example, if the `.book` file
is in `project/src/docs/example.book`, the value `"../../"` would generate
documentation from the `project/` directory.
- `title`: Optional. Full human-readable title of the documentation book. This
is used when there's plenty of display space and should completely describe
the book. Good titles are things like `"Example Documentation"`, or
`"libcabin Developer Documentation"`.
- `short`: Optional. Shorter version of the title for use when display space
is limited (for example, in navigation breadcrumbs). If omitted, the full
title is used. Good short titles are things like `"Example Docs"` or
`"libcabin Dev Docs"`.
- `uri.source`: Optional. Diviner can link from the documentation to a
repository browser so that you can quickly jump to the definition of a class
or function. To do this, it uses a URI pattern which you specify here.
Normally, this URI should point at a repository browser like Diffusion.
For example, `"http://repobrowser.yourcompany.com/%f#%l"`. You can use these
conversions in the URI, which will be replaced at runtime:
- `%f`: Replaced with the name of the file.
- `%l`: Replaced with the line number.
- `%%`: Replaced with a literal `%` symbol.
- `rules`: Optional. A map of regular expressions to Atomizer classes which
controls which documentation generator runs on each file. If omitted,
Diviner will use its default ruleset. For example, adding the key
`"(\\.diviner$)"` to the map with value `"DivinerArticleAtomizer"` tells
Diviner to analyze any file with a name ending in `.diviner` using the
"article" atomizer.
- `exclude`: Optional. A list of regular expressions matching paths which
will be excluded from documentation generation for this book. For example,
adding a pattern like `"(^externals/)"` or `"(^vendor/)"` will make Diviner
ignore those directories.
- `groups`: Optional. Describes top level organizational groups which atoms
should be placed into.
diff --git a/src/docs/user/userguide/drydock.diviner b/src/docs/user/userguide/drydock.diviner
index 0d43f7f3f0..52ef2d74af 100644
--- a/src/docs/user/userguide/drydock.diviner
+++ b/src/docs/user/userguide/drydock.diviner
@@ -1,260 +1,260 @@
@title Drydock User Guide
@group userguide
Drydock, a software and hardware resource manager.
Overview
========
WARNING: Drydock is very new and has many sharp edges. Prepare yourself for
a challenging adventure in unmapped territory, not a streamlined experience
where things work properly or make sense.
Drydock is an infrastructure application that primarily helps other
applications coordinate during complex build and deployment tasks. Typically,
you will configure Drydock to enable capabilities in other applications:
- Harbormaster can use Drydock to host builds.
- Differential can use Drydock to perform server-side merges.
Users will not normally interact with Drydock directly.
If you want to get started with Drydock right away, see
@{article:Drydock User Guide: Quick Start} for specific instructions on
configuring integrations.
What Drydock Does
=================
Drydock manages working copies, hosts, and other software and hardware
resources that build and deployment processes may require in order to perform
useful work.
Many useful processes need a working copy of a repository (or some similar sort
of resource) so they can read files, perform version control operations, or
execute code.
For example, you might want to be able to automatically run unit tests, build a
binary, or generate documentation every time a new commit is pushed. Or you
might want to automatically merge a revision or cherry-pick a commit from a
development branch to a release branch. Any of these tasks need a working copy
of the repository before they can get underway.
These processes could just clone a new working copy when they started and
delete it when they finished. This works reasonably well at a small scale, but
will eventually hit limitations if you want to do things like: expand the build
tier to multiple machines; or automatically scale the tier up and down based on
usage; or reuse working copies to improve performance; or make sure things get
cleaned up after a process fails; or have jobs wait if the tier is too busy.
Solving these problems effectively requires coordination between the processes
doing the actual work.
Drydock solves these scaling problems by providing a central allocation
framework for //resources//, which are physical or virtual resources like a
host or a working copy. Processes which need to share hardware or software can
use Drydock to coordinate creation, access, and destruction of those resources.
Applications ask Drydock for resources matching a description, and it allocates
a corresponding resource by either finding a suitable unused resource or
creating a new resource. When work completes, the resource is returned to the
resource pool or destroyed.
Getting Started with Drydock
============================
In general, you will interact with Drydock by configuring blueprints, which
tell Drydock how to build resources. You can jump into this topic directly
in @{article:Drydock Blueprints}.
For help on configuring specific application features:
- to configure server-side merges from Differential, see
@{article:Differential User Guide: Automated Landing}.
You should also understand the Drydock security model before deploying it
in a production environment. See @{article:Drydock User Guide: Security}.
The remainder of this document has some additional high-level discussion about
how Drydock works and why it works that way, which may be helpful in
understanding the application as a whole.
Drydock Concepts
================
The major concepts in Drydock are **Blueprints**, **Resources**, **Leases**,
and the **Allocator**.
**Blueprints** are configuration that tells Drydock how to create resources:
where it can put them, how to access them, how many it can make at once, who is
allowed to ask for access to them, how to actually build them, how to clean
them up when they are no longer in use, and so on.
Drydock starts without any blueprints. You'll add blueprints to configure
Drydock and enable it to satisfy requests for resources. You can learn more
about blueprints in @{article:Drydock Blueprints}.
**Resources** represent things (like hosts or working copies) that Drydock has
created, is managing the lifecycle for, and can give other applications access
to.
**Leases** are requests for resources with certain qualities by other
applications. For example, Harbormaster may request a working copy of a
particular repository so it can run unit tests.
The **Allocator** is where Drydock actually does work. It works roughly like
this:
- An application creates a lease describing a resource it needs, and
uses this lease to ask Drydock for an appropriate resource.
- Drydock looks at free resources to try to find one it can use to satisfy
the request. If it finds one, it marks the resource as in use and gives
the application details about how to access it.
- If it can't find an appropriate resource that already exists, it looks at
the blueprints it has configured to try to build one. If it can, it creates
a new resource, then gives the application access to it.
- Once the application finishes using the resource, it frees it. Depending
on configuration, Drydock may reuse it, destroy it, or hold onto it and
make a decision later.
Some minor concepts in Drydock are **Slot Locks** and **Repository Operations**.
**Slot Locks** are simple optimistic locks that most Drydock blueprints use to
avoid race conditions. Their design is not particularly interesting or novel,
they're just a fairly good fit for most of the locking problems that Drydock
blueprints tend to encounter and Drydock provides APIs to make them easy to
work with.
**Repository Operations** help other applications coordinate writes to
repositories. Multiple applications perform similar kinds of writes, and these
writes require more sequencing/coordination and user feedback than other
operations.
Architecture Overview
=====================
This section describes some of Drydock's design goals and architectural
choices, so you can understand its strengths and weaknesses and which problem
domains it is well or poorly suited for.
A typical use case for Drydock is giving another application access to a
working copy in order to run a build or unit test operation. Drydock can
satisfy the request and resume execution of application code in 1-2 seconds
under reasonable conditions and with moderate tradeoffs, and can satisfy a
large number of these requests in parallel.
**Scalable**: Drydock is designed to scale easily to something in the realm of
thousands of hosts in hundreds of pools, and far beyond that with a little
work.
Drydock is intended to solve resource management problems at very large scales
and minimizes blocking operations, locks, and artificial sequencing. Drydock is
designed to fully utilize an almost arbitrarily large pool of resources and
improve performance roughly linearly with available hardware.
Because the application assumes that deployment at this scale and complexity
level is typical, you may need to configure more things and do more work than
you would under the simplifying assumptions of small scale.
**Heavy Resources**: Drydock assumes that resources are relatively
heavyweight and and require a meaningful amount (a second or more) of work to
build, maintain and tear down. It also assumes that leases will often have
substantial lifespans (seconds or minutes) while performing operations.
Resources like working copies (which typically take several seconds to create
with a command like `git clone`) and VMs (which typically take several seconds
to spin up) are good fits for Drydock and for the problems it is intended to
solve.
Lease operations like running unit tests, performing builds, executing merges,
generating documentation and running temporary services (which typically last
at least a few seconds) are also good fits for Drydock.
In both cases, the general concern with lightweight resources and operations is
that Drydock operation overhead is roughly on the order of a second for many
tasks, so overhead from Drydock will be substantial if resources are built and
torn down in a few milliseconds or lease operations require only a fraction of
a second to execute.
As a rule of thumb, Drydock may be a poor fit for a problem if operations
typically take less than a second to build, execute, and destroy.
**Focus on Resource Construction**: Drydock is primarily solving a resource
construction problem: something needs a resource matching some description, so
Drydock finds or builds that resource as quickly as possible.
Drydock generally prioritizes responding to requests quickly over other
concerns, like minimizing waste or performing complex scheduling. Although you
can make adjustments to some of these behaviors, it generally assumes that
resources are cheap compared to the cost of waiting for resource construction.
This isn't to say that Drydock is grossly wasteful or has a terrible scheduler,
just that efficient utilization and efficient scheduling aren't the primary
problems the design focuses on.
This prioritization corresponds to scenarios where resources are something like
hosts or working copies, and operations are something like builds, and the cost
of hosts and storage is small compared to the cost of engineer time spent
waiting on jobs to get scheduled.
Drydock may be a weak fit for a problem if it is bounded by resource
availability and using resources as efficiently as possible is very important.
Drydock generally assumes you will respond to a resource deficit by making more
resources available (usually very cheap), rather than by paying engineers to
wait for operations to complete (usually very expensive).
**Isolation Tradeoffs**: Drydock assumes that multiple operations running at
similar levels of trust may be interested in reducing isolation to improve
performance, reduce complexity, or satisfy some other similar goal. It does not
guarantee isolation and assumes most operations will not run in total isolation.
If this isn't true for your use case, you'll need to be careful in configuring
Drydock to make sure that operations are fully isolated and can not interact.
Complete isolation will reduce the performance of the allocator as it will
generally prevent it from reusing resources, which is one of the major ways it
can improve performance.
You can find more discussion of these tradeoffs in
@{article:Drydock User Guide: Security}.
**Agentless**: Drydock does not require an agent or daemon to be installed on
hosts. It interacts with hosts over SSH.
**Very Abstract**: Drydock's design is //extremely// abstract. Resources have
very little hardcoded behavior. The allocator has essentially zero specialized
knowledge about what it is actually doing.
One aspect of this abstractness is that Drydock is composable, and solves
complex allocation problems by //asking itself// to build the pieces it needs.
To build a working copy, Drydock first asks itself for a suitable host. It
solves this allocation sub-problem, then resolves the original request.
This allows new types of resources to build on Drydock's existing knowledge of
resource construction by just saying "build one of these other things you
already know how to build, then apply a few adjustments". This also means that
you can tell Drydock about a new way to build hosts (say, bring up VMs from a
different service provider) and the rest of the pipeline can use these new
hosts interchangeably with the old hosts.
While this design theoretically makes Drydock more powerful and more flexible
than a less abstract approach, abstraction is frequently a double-edged sword.
Drydock is almost certainly at the extreme upper end of abstraction for tools
in this space, and the level of abstraction may ultimately match poorly with a
particular problem domain. Alternative approaches may give you more specialized
and useful tools for approaching a given problem.
Next Steps
==========
Continue by:
- understanding Drydock security concerns with
@{article:Drydock User Guide: Security}; or
- learning about blueprints in @{article:Drydock Blueprints}; or
- - allowing Phabricator to write to repositories with
+ - allowing Phorge to write to repositories with
@{article:Drydock User Guide: Repository Automation}.
diff --git a/src/docs/user/userguide/drydock_hosts.diviner b/src/docs/user/userguide/drydock_hosts.diviner
index 1b8f22cce1..eb506a9c0a 100644
--- a/src/docs/user/userguide/drydock_hosts.diviner
+++ b/src/docs/user/userguide/drydock_hosts.diviner
@@ -1,126 +1,126 @@
@title Drydock Blueprints: Hosts
@group userguide
Guide to configuring Drydock host blueprints.
Overview
========
IMPORTANT: Drydock is not a mature application and may be difficult to
configure and use for now.
To give Drydock access to machines so it can perform work, you'll configure
**host blueprints**. These blueprints tell Drydock where to find machines (or
how to build machines) and how to connect to them.
Once Drydock has access to hosts it can use them to build more interesting and
complex types of resources, like repository working copies.
Drydock currently supports these kinds of host blueprints:
- **Almanac Hosts**: Gives Drydock access to a predefined list of hosts.
Drydock may support additional blueprints in the future.
Security
========
Drydock can be used to run semi-trusted and untrusted code, and you may want
to isolate specific processes or classes of processes from one another. See
@{article:Drydock User Guide: Security} for discussion of security
concerns and guidance on how to make isolation tradeoffs.
General Considerations
======================
**You must install software on hosts.** Drydock does not currently handle
installing software on hosts. You'll need to make sure any hosts are configured
properly with any software you need, and have tools like `git`, `hg` or `svn`
that may be required to interact with working copies.
-You do **not** need to install PHP, arcanist, or Phabricator on the
+You do **not** need to install PHP, arcanist, or Phorge on the
hosts unless you are specifically running `arc` commands.
**You must configure authentication.** Drydock also does not handle credentials
for VCS operations. If you're interacting with repositories hosted on
-Phabricator, the simplest way to set this up is something like this:
+Phorge, the simplest way to set this up is something like this:
- - Create a new bot user in Phabricator.
+ - Create a new bot user in Phorge.
- In {nav Settings > SSH Public Keys}, add a public key or generate a
keypair.
- Put the private key on your build hosts as `~/.ssh/id_rsa` for whatever
user you're connecting with.
-This will let processes on the host access Phabricator as the bot user, and
+This will let processes on the host access Phorge as the bot user, and
use the bot user's permissions to pull and push changes.
If you're using hosted repositories from an external service, you can follow
similar steps for that service.
Note that any processes running under the given user account will have access
to the private key, so you should give the bot the smallest acceptable level of
permissions if you're running semi-trusted or untrusted code like unit tests.
**You must create a `/var/drydock` directory.** This is hard-coded in Drydock
for now, so you need to create it on the hosts. This can be a symlink to
a different location if you prefer.
Almanac Hosts
=============
The **Almanac Hosts** blueprint type gives Drydock access to a predefined list
of hosts which you configure in the Almanac application. This is the simplest
type of blueprint to set up.
For more information about Almanac, see @{article:Almanac User Guide}.
For example, suppose you have `build001.mycompany.com` and
`build002.mycompany.com`, and want to configure Drydock to be able to use these
hosts. To do this:
**Create Almanac Devices**: Create a device record in Almanac for each your
hosts.
{nav Almanac > Devices > Create Device}
Enter the device names (like `build001.mycompany.com`). After creating the
devices, use {nav Add Interface} to configure the ports and IP addresses that
Drydock should connect to over SSH (normally, this is port `22`).
**Create an Almanac Service**: In the Almanac application, create a new service
to define the pool of devices you want to use.
{nav Almanac > Services > Create Service}
Choose the service type **Drydock: Resource Pool**. This will allow Drydock
to use the devices that are bound to the service.
Now, use {nav Add Binding} to bind all of the devices to the service.
You can add more hosts to the pool later by binding additional devices, and
Drydock will automatically start using them. Likewise, you can remove bindings
to take hosts out of service.
**Create a Drydock Blueprint**: Now, create a new blueprint in Drydock.
{nav Drydock > Blueprints > New Blueprint}
Choose the **Almanac Hosts** blueprint type.
In **Almanac Services**, select the service you previously created. For
**Credentials**, select an SSH private key you want Drydock to use to connect
to the hosts.
Drydock should now be able to build resources from these hosts.
Next Steps
==========
Continue by:
- returning to @{article:Drydock Blueprints}.
diff --git a/src/docs/user/userguide/drydock_quick_start.diviner b/src/docs/user/userguide/drydock_quick_start.diviner
index 4b0bd7110d..977b07a482 100644
--- a/src/docs/user/userguide/drydock_quick_start.diviner
+++ b/src/docs/user/userguide/drydock_quick_start.diviner
@@ -1,74 +1,74 @@
@title Drydock User Guide: Quick Start
@group userguide
Guide to getting Drydock
Quick Start: Land Revisions
===========================
Quick start guide to getting "Land Revision" working in Differential. For
a more detailed guide, see @{article:Drydock User Guide: Repository Automation}.
Choose a repository you want to enable "Land Revision" for. We'll call this
**Repository X**.
You need to configure a staging area for this repository if you haven't
already. You can do this in Diffusion in {nav Edit Repository > Edit Staging}.
We'll call this **Staging Area Y**.
Choose or create a host you want to run merges on. We'll call this
`automation001`. For example, you might bring up a new host in EC2 and
label it `automation001.mycompany.com`. You can use an existing host if you
prefer.
Create a user account on the host, or choose an existing user account. This is
the user that merges will execute under: Drydock will connect to it and run a
bunch of `git` commands, then ultimately run `git push`. We'll call this user
`builder`.
Install `git`, `hg` or `svn` if you haven't already and set up private keys
for `builder` so it can pull and push any repositories you want to operate
on.
-If your repository and/or staging area are hosted in Phabricator, you may want
+If your repository and/or staging area are hosted in Phorge, you may want
to create a corresponding bot account so you can add keys and give it
permissions.
At this point you should be able to `ssh builder@automation001` to connect to
the host, and get a normal shell. You should be able to `git clone ...` from
**Repository X** and from **Staging Area Y**, and `git push` to **Repository
X**. If you can't, configure things so you can.
Now, create a host blueprint for the host. You can find a more detailed
walkthrough in @{article:Drydock Blueprints: Hosts}. Briefly:
- Create an Almanac device for the host. This should have the IP address and
port for your host.
- Create an Almanac service bound to the device. This should be a Drydock
resource pool service and have a binding to the IP from the previous step.
- Create a Drydock host blueprint which uses the service from the previous
step. It should be configured with an SSH private key that can be used
to connect to `builder@automation001`.
Then, create a new working copy blueprint which uses the host blueprint you
just made. You can find a more detailed walkthrough in @{article:Drydock
Blueprints: Working Copies}. Authorize the working copy blueprint to use the
host blueprint.
Finally, configure repository automation for **Repository X**:
{nav Edit Repository > Edit Automation}. Provide the working copy blueprint
from the previous step. Authorize the repository to use the working copy
blueprint.
After you save changes, click {nav Test Configuration} to test that things
are working properly.
The "Land Revision" action should now be available on revisions for this
repository.
Next Steps
==========
Continue by:
- returning to @{article:Drydock User Guide}.
diff --git a/src/docs/user/userguide/drydock_repository_automation.diviner b/src/docs/user/userguide/drydock_repository_automation.diviner
index 35ef932cc2..25b6343e24 100644
--- a/src/docs/user/userguide/drydock_repository_automation.diviner
+++ b/src/docs/user/userguide/drydock_repository_automation.diviner
@@ -1,85 +1,85 @@
@title Drydock User Guide: Repository Automation
@group userguide
-Configuring repository automation so Phabricator can push commits.
+Configuring repository automation so Phorge can push commits.
Overview
========
IMPORTANT: This feature is very new and some of the capabilities described
in this document are not yet available. This feature as a whole is a prototype.
By configuring Drydock and Diffusion appropriately, you can enable **Repository
-Automation** for a repository. This will allow Phabricator to make changes
+Automation** for a repository. This will allow Phorge to make changes
to the repository.
Limitations
===========
- This feature is a prototype.
- Only Git is supported.
Security
========
-Configuring repository automation amounts to telling Phabricator where it
+Configuring repository automation amounts to telling Phorge where it
should perform working copy operations (like merges, cherry-picks and pushes)
when doing writes.
Depending on how stringent you are about change control, you may want to
make sure these processes are isolated and can not be tampered with. If you
run tests and automation on the same hardware, tests may be able to interfere
with automation. You can read more about this in
@{article:Drydock User Guide: Security}.
Configuring Automation
======================
To configure automation, use {nav Edit Repository > Edit Automation} from
Diffusion.
On the configuration screen, specify one or more working copy blueprints in
Drydock (usually, you'll just use one). Repository automation will use working
copies built by these blueprints to perform merges and push changes.
For more details on configuring these blueprints, see
@{article:Drydock Blueprints: Working Copies}.
After selecting one or more blueprints, make sure you authorize the repository
to use them. Automation operations won't be able to proceed until you do. The
UI will remind you if you have unauthorized blueprints selected.
Testing Configuration
=====================
Once the blueprints are configured and authorized, use {nav Test Configuration}
to check that things are configured correctly. This will build a working copy
in Drydock, connect to it, and run a trivial command (like `git status`) to
make sure things work.
If it's the first time you're doing this, it may take a few moments since it
will need to clone a fresh working copy.
If the test is successful, your configuration is generally in good shape. If
not, it should give you more details about what went wrong.
Since the test doesn't actually do a push, it's possible that you may have
everything configured properly //except// write access. In this case, you'll
run into a permission error when you try to actually perform a merge or other
similar write. If you do, adjust permissions or credentials appropriately so
the working copy can be pushed from.
Next Steps
==========
Continue by:
- understanding Drydock security concerns with
@{article:Drydock User Guide: Security}; or
- returning to the @{article:Drydock User Guide}.
diff --git a/src/docs/user/userguide/drydock_security.diviner b/src/docs/user/userguide/drydock_security.diviner
index 9a212437c7..f7df777ef9 100644
--- a/src/docs/user/userguide/drydock_security.diviner
+++ b/src/docs/user/userguide/drydock_security.diviner
@@ -1,209 +1,209 @@
@title Drydock User Guide: Security
@group userguide
Understanding security concerns in Drydock.
Overview
========
Different applications use Drydock for different things, and some of the things
they do with Drydock require different levels of trust and access. It is
important to configure Drydock properly so that less trusted code can't do
anything you aren't comfortable with.
For example, running unit tests on Drydock normally involves running relatively
untrusted code (it often has a single author and has not yet been reviewed)
that needs very few capabilities (generally, it only needs to be able to report
-results back to Phabricator). In contrast, automating merge requests on Drydock
+results back to Phorge). In contrast, automating merge requests on Drydock
involves running trusted code that needs more access (it must be able to write
to repositories).
Drydock allows resources to be shared and reused, so it's possible to configure
Drydock in a way that gives untrusted code a lot of access by accident.
One way Drydock makes allocations faster is by sharing, reusing, and recycling
resources. When an application asks Drydock for a working copy, it will try to
satisfy the request by cleaning up a suitable existing working copy if it can,
instead of building a new one. This is faster, but it means that tasks have
some ability to interact or interfere with each other.
Similarly, Drydock may allocate multiple leases on the same host at the same
time, running as the same user. This is generally simpler to configure and less
wasteful than fully isolating leases, but means that they can interact.
Depending on your organization, environment and use cases, you might not want
this, and it may be important that different use cases are unable to interfere
with each other. For example, you might want to prevent unit tests from writing
to repositories.
**Drydock does not guarantee that resources are isolated by default**. When
resources are more isolated, they are usually also harder to configure and
slower to allocate. Because most installs will want to find a balance between
isolation and complexity/performance, Drydock does not make assumptions about
either isolation or performance having absolute priority.
You'll usually want to isolate things just enough that nothing bad can happen.
Fortunately, this is straightforward. This document describes how to make sure
you have enough isolation so that nothing you're uncomfortable with can occur.
Choosing an Isolation Policy
============================
This section provides some reasonable examples of ways you might approach
configuring Drydock.
| Isolation | Suitable For | Description
|-----------|-----|-------
| Zero | Development | Everything on one host.
| Low | Small Installs | Use a dedicated Drydock host.
| High | Most Installs | **Recommended**. Use low-trust and high-trust pools.
| Custom | Special Requirements | Use multiple pools.
| Absolute | Special Requirements | Completely isolate all resources.
-**Zero Isolation**: Run Drydock operations on the same host that Phabricator
-runs on. This is only suitable for developing or testing Phabricator. Any
-Drydock operation can potentially compromise Phabricator. It is intentionally
+**Zero Isolation**: Run Drydock operations on the same host that Phorge
+runs on. This is only suitable for developing or testing Phorge. Any
+Drydock operation can potentially compromise Phorge. It is intentionally
difficult to configure Drydock to operate in this mode. Running Drydock
-operations on the Phabricator host is strongly discouraged.
+operations on the Phorge host is strongly discouraged.
**Low Isolation**: Designate a separate Drydock host and run Drydock
operations on it. This is suitable for small installs and provides a reasonable
level of isolation. However, it will allow unit tests (which often run
lower-trust code) to interfere with repository automation operations.
**High Isolation**: Designate two Drydock host pools and run low-trust
operations (like builds) on one pool and high-trust operations (like repository
automation) on a separate pool. This provides a good balance between isolation
and performance, although tests can still potentially interfere with the
execution of unrelated tests.
**Custom Isolation**: You can continue adding pools to refine the resource
isolation model. For example, you may have higher-trust and lower-trust
repositories or do builds on a mid-trust tier which runs only reviewed code.
**Absolute Isolation**: Configure blueprints to completely initialize and
destroy hosts or containers on every request, and limit all resources to one
simultaneous lease. This will completely isolate every operation, but come at
a high performance and complexity cost.
NOTE: It is not currently possible to configure Drydock in an absolute
isolation mode.
It is usually reasonable to choose one of these approaches as a starting point
and then adjust it to fit your requirements. You can also evolve your use of
Drydock over time as your needs change.
Threat Scenarios
================
This section will help you understand the threats to a Drydock environment.
Not all threats will be concerning to all installs, and you can choose an
approach which defuses only the threats you care about.
Attackers have three primary targets:
- capturing hosts;
- - compromising Phabricator; and
+ - compromising Phorge; and
- compromising the integrity of other Drydock processes.
**Attacks against hosts** are the least sophisticated. In this scenario, an
attacker wants to run a program like a Bitcoin miner or botnet client on
hardware that they aren't paying for or which can't be traced to them. They
write a "unit test" or which launches this software, then send a revision
-containing this "unit test" for review. If Phabricator is configured to
+containing this "unit test" for review. If Phorge is configured to
automatically run tests on new revisions, it may execute automatically and give
the attacker access to computing resources they did not previously control and
which can not easily be traced back to them.
This is usually only a meaningful threat for open source installs, because
there is a high probability of eventual detection and the value of these
resources is small, so employees will generally not have an incentive to
attempt this sort of attack. The easiest way to prevent this attack is to
prevent untrusted, anonymous contributors from running tests. For example,
create a "Trusted Contributors" project and only run tests if a revision author
is a member of the project.
-**Attacks against Phabricator** are more sophisticated. In this scenario, an
-attacker tries to compromise Phabricator itself (for example, to make themselves
+**Attacks against Phorge** are more sophisticated. In this scenario, an
+attacker tries to compromise Phorge itself (for example, to make themselves
an administrator or gain access to an administrator account).
-This is made possible if Drydock is running on the same host as Phabricator or
-runs on a privileged subnet with access to resources like Phabricator database
+This is made possible if Drydock is running on the same host as Phorge or
+runs on a privileged subnet with access to resources like Phorge database
hosts. Most installs should be concerned about this attack.
The best way to defuse this attack is to run Drydock processes on a separate
host which is not on a privileged subnet. For example, use a
`build.mycompany.com` host or pool for Drydock processes, separate from your
-`phabricator.mycompany.com` host or pool.
+`phorge.mycompany.com` host or pool.
Even if the host is not privileged, many Drydock processes have some level of
privilege (enabling them to clone repositories, or report test results back to
-Phabricator). Be aware that tests can hijack credentials they are run with,
+Phorge). Be aware that tests can hijack credentials they are run with,
and potentially hijack credentials given to other processes on the same hosts.
You should use credentials with a minimum set of privileges and assume all
processes on a host have the highest level of access that any process on the
host has.
**Attacks against Drydock** are the most sophisticated. In this scenario, an
attacker uses one Drydock process to compromise a different process: for
example, a unit test which tampers with a merge or injects code into a build.
This might allow an attacker to make changes to a repository or binary without
going through review or triggering other rules which would normally detect the
change.
These attackers could also make failing tests appear to pass, or break tests or
builds, but these attacks are generally less interesting than tampering with
a repository or binary.
This is a complex attack which you may not have to worry about unless you have
a high degree of process and control in your change pipeline. If users can push
changes directly to repositories, this often represents a faster and easier way
to achieve the same tampering.
The best way to defuse this attack is to prevent high-trust (repository
automation) processes from running on the same hosts as low-trust (unit test)
processes. For example, use an `automation.mycompany.com` host or pool for
repository automation, and a `build.mycompany.com` host or pool for tests.
Applying an Isolation Policy
============================
Designing a security and isolation policy for Drydock can take some thought,
but applying it is straightforward. Applications which want to use Drydock must
explicitly list which blueprints they are allowed to use, and they must be
approved to use them in Drydock. By default, nothing can do anything, which is
very safe and secure.
To get builds or automation running on a host, specify the host blueprint as a
usable blueprint in the build step or repository configuration. This creates a
new authorization request in Drydock which must be approved before things can
move forward.
Until the authorization is approved, the process can not use the blueprint to
create any resources, nor can it use resources previously created by the
blueprint.
You can review and approve requests from the blueprint detail view in Drydock:
find the request and click {nav Approve Authorization}. You can also revoke
approval at any time from this screen which will prevent the object from
continuing to use the blueprint (but note that this does not release any
existing leases).
Once the authorization request is approved, the build or automation process
should be able to run if everything else is configured properly.
Note that authorizations are transitive: if a build step is authorized to use
blueprint A, and blueprint A is authorized to use blueprint B, the build step
may indirectly operate on resources created by blueprint B. This should
normally be consistent with expectations.
Next Steps
==========
Continue by:
- returning to the @{article:Drydock User Guide}.
diff --git a/src/docs/user/userguide/events.diviner b/src/docs/user/userguide/events.diviner
index e18578288b..d004dad84f 100644
--- a/src/docs/user/userguide/events.diviner
+++ b/src/docs/user/userguide/events.diviner
@@ -1,218 +1,218 @@
@title Events User Guide: Installing Event Listeners
@group userguide
-Using Phabricator event listeners to customize behavior.
+Using Phorge event listeners to customize behavior.
= Overview =
(WARNING) The event system is an artifact of a bygone era. Use of the event
system is strongly discouraged. We have been removing events since 2013 and
will continue to remove events in the future.
-Phabricator and Arcanist allow you to install custom runtime event listeners
+Phorge and Arcanist allow you to install custom runtime event listeners
which can react to certain things happening (like a Maniphest Task being edited
or a user creating a new Differential Revision) and run custom code to perform
logging, synchronize with other systems, or modify workflows.
-These listeners are PHP classes which you install beside Phabricator or
-Arcanist, and which Phabricator loads at runtime and runs in-process. They
+These listeners are PHP classes which you install beside Phorge or
+Arcanist, and which Phorge loads at runtime and runs in-process. They
require somewhat more effort upfront than simple configuration switches, but are
the most direct and powerful way to respond to events.
-= Installing Event Listeners (Phabricator) =
+= Installing Event Listeners (Phorge) =
-To install event listeners in Phabricator, follow these steps:
+To install event listeners in Phorge, follow these steps:
- Write a listener class which extends @{class@arcanist:PhutilEventListener}.
- Add it to a libphutil library, or create a new library (for instructions,
- see @{article@phabcontrib:Adding New Classes}.
- - Configure Phabricator to load the library by adding it to `load-libraries`
- in the Phabricator config.
- - Configure Phabricator to install the event listener by adding the class
- name to `events.listeners` in the Phabricator config.
+ see @{article@contrib:Adding New Classes}.
+ - Configure Phorge to load the library by adding it to `load-libraries`
+ in the Phorge config.
+ - Configure Phorge to install the event listener by adding the class
+ name to `events.listeners` in the Phorge config.
You can verify your listener is registered in the "Events" tab of DarkConsole.
It should appear at the top under "Registered Event Listeners". You can also
see any events the page emitted there. For details on DarkConsole, see
@{article:Using DarkConsole}.
= Installing Event Listeners (Arcanist) =
To install event listeners in Arcanist, follow these steps:
- Write a listener class which extends @{class@arcanist:PhutilEventListener}.
- Add it to a libphutil library, or create a new library (for instructions,
- see @{article@phabcontrib:Adding New Classes}.
- - Configure Phabricator to load the library by adding it to `load`
+ see @{article@contrib:Adding New Classes}.
+ - Configure Phorge to load the library by adding it to `load`
in the Arcanist config (e.g., `.arcconfig`, or user/global config).
- Configure Arcanist to install the event listener by adding the class
name to `events.listeners` in the Arcanist config.
You can verify your listener is registered by running any `arc` command with
`--trace`. You should see output indicating your class was registered as an
event listener.
= Example Listener =
-Phabricator includes an example event listener,
+Phorge includes an example event listener,
@{class:PhabricatorExampleEventListener}, which may be useful as a starting
point in developing your own listeners. This listener listens for a test
event that is emitted by the script `scripts/util/emit_test_event.php`.
If you run this script normally, it should output something like this:
$ ./scripts/util/emit_test_event.php
Emitting event...
Done.
This is because there are no listeners for the event, so nothing reacts to it
when it is emitted. You can add the example listener by either adding it to
your `events.listeners` configuration or with the `--listen` command-line flag:
$ ./scripts/util/emit_test_event.php --listen PhabricatorExampleEventListener
Installing 'PhabricatorExampleEventListener'...
Emitting event...
PhabricatorExampleEventListener got test event at 1341344566
Done.
This time, the listener was installed and had its callback invoked when the
test event was emitted.
= Available Events =
-You can find a list of all Phabricator events in @{class:PhabricatorEventType}.
+You can find a list of all Phorge events in @{class:PhabricatorEventType}.
== All Events ==
The special constant `PhutilEventType::TYPE_ALL` will let you listen for all
events. Normally, you want to listen only to specific events, but if you're
writing a generic handler you can listen to all events with this constant
rather than by enumerating each event.
== Arcanist Events ==
Arcanist event constants are listed in @{class@arcanist:ArcanistEventType}.
All Arcanist events have this data available:
- `workflow` The active @{class@arcanist:ArcanistWorkflow}.
== Arcanist: Commit: Will Commit SVN ==
The constant for this event is `ArcanistEventType::TYPE_COMMIT_WILLCOMMITSVN`.
This event is dispatched before an `svn commit` occurs and allows you to
modify the commit message. Data available on this event:
- `message` The text of the message.
== Arcanist: Diff: Will Build Message ==
The constant for this event is `ArcanistEventType::TYPE_DIFF_WILLBUILDMESSAGE`.
This event is dispatched before an editable message is presented to the user,
and allows you to, e.g., fill in default values for fields. Data available
on this event:
- `fields` A map of field values to be compiled into a message.
== Arcanist: Diff: Was Created ==
The constant for this event is `ArcanistEventType::TYPE_DIFF_WASCREATED`.
This event is dispatched after a diff is created. It is currently only useful
for collecting timing information. No data is available on this event.
== Arcanist: Revision: Will Create Revision ==
The constant for this event is
`ArcanistEventType::TYPE_REVISION_WILLCREATEREVISION`.
This event is dispatched before a revision is created. It allows you to modify
fields to, e.g., edit revision titles. Data available on this event:
- `specification` Parameters that will be used to invoke the
`differential.createrevision` Conduit call.
== Differential: Will Mark Generated ==
The constant for this event is
`PhabricatorEventType::TYPE_DIFFERENTIAL_WILLMARKGENERATED`.
This event is dispatched before Differential decides if a file is generated (and
doesn't need to be reviewed) or not. Data available on this event:
- `corpus` Body of the file.
- `is_generated` Boolean indicating if this file should be treated as
generated.
== Diffusion: Did Discover Commit ==
The constant for this event is
`PhabricatorEventType::TYPE_DIFFUSION_DIDDISCOVERCOMMIT`.
This event is dispatched when the daemons discover a commit for the first time.
This event happens very early in the pipeline, and not all commit information
will be available yet. Data available on this event:
- `commit` The @{class:PhabricatorRepositoryCommit} that was discovered.
- `repository` The @{class:PhabricatorRepository} the commit was discovered
in.
== Test: Did Run Test ==
The constant for this event is
`PhabricatorEventType::TYPE_TEST_DIDRUNTEST`.
This is a test event for testing event listeners. See above for details.
== UI: Did Render Actions ==
The constant for this event is
`PhabricatorEventType::TYPE_UI_DIDRENDERACTIONS`.
This event is dispatched after a @{class:PhabricatorActionListView} is built by
the UI. It allows you to add new actions that your application may provide, like
"Fax this Object". Data available on this event:
- `object` The object which actions are being rendered for.
- `actions` The current list of available actions.
NOTE: This event is unstable and subject to change.
= Debugging Listeners =
If you're having problems with your listener, try these steps:
- - If you're getting an error about Phabricator being unable to find the
+ - If you're getting an error about Phorge being unable to find the
listener class, make sure you've added it to a libphutil library and
- configured Phabricator to load the library with `load-libraries`.
+ configured Phorge to load the library with `load-libraries`.
- Make sure the listener is registered. It should appear in the "Events" tab
of DarkConsole. If it's not there, you may have forgotten to add it to
`events.listeners`.
- Make sure it calls `listen()` on the right events in its `register()`
method. If you don't listen for the events you're interested in, you
won't get a callback.
- Make sure the events you're listening for are actually happening. If they
occur on a normal page they should appear in the "Events" tab of
DarkConsole. If they occur on a POST, you could add a `phlog()`
to the source code near the event and check your error log to make sure the
code ran.
- You can check if your callback is getting invoked by adding `phlog()` with
a message and checking the error log.
- You can try listening to `PhutilEventType::TYPE_ALL` instead of a specific
event type to get all events, to narrow down whether problems are caused
by the types of events you're listening to.
- You can edit the `emit_test_event.php` script to emit other types of
events instead, to test that your listener reacts to them properly. You
might have to use fake data, but this gives you an easy way to test the
at least the basics.
- For scripts, you can run under `--trace` to see which events are emitted
and how many handlers are listening to each event.
= Next Steps =
Continue by:
- taking a look at @{class:PhabricatorExampleEventListener}; or
- building a library with @{article:libphutil Libraries User Guide}.
diff --git a/src/docs/user/userguide/external_editor.diviner b/src/docs/user/userguide/external_editor.diviner
index ce39ad6610..1e44aeec34 100644
--- a/src/docs/user/userguide/external_editor.diviner
+++ b/src/docs/user/userguide/external_editor.diviner
@@ -1,78 +1,78 @@
@title User Guide: Configuring an External Editor
@group userguide
Setting up an external editor to integrate with Diffusion and Differential.
Overview
========
You can configure a URI handler to allow you to open files referenced in
Differential and Diffusion in your preferred text editor on your local
machine.
Configuring Editors
===================
To configure an external editor, go to {nav Settings > Application Settings >
External Editor} and set "Editor Link" to a URI pattern (see below). This
will enable an "Open in Editor" link in Differential, and an "Edit" button in
Diffusion.
In general, you'll set this field to something like this, although the
particular pattern to use depends on your editor and environment:
```lang=uri
editor://open/?file=%f
```
Mapping Repositories
====================
-When you open a file in an external editor, Phabricator needs to be able to
+When you open a file in an external editor, Phorge needs to be able to
build a URI which includes the correct absolute path on disk to the local
version of the file, including the repository directory.
If all your repositories are named consistently in a single directory, you
may be able to use the `%n` (repository short name) variable to do this.
For example:
```lang=uri
editor://open/?file=/Users/alice/repositories/%n/%f
```
If your repositories aren't named consistently or aren't in a single location,
you can build a local directory of symlinks which map a repositoriy identifier
to the right location on disk:
```
/Users/alice/editor_links/ $ ls -l
... search-service/ -> /Users/alice/backend/search/
... site-templates/ -> /Users/alice/frontend/site/
```
Then use this directory in your editor URI:
```lang=uri
editor://open/?file=/Users/alice/editor_links/%n/%f
```
Instead of `%n` (repository short name), you can also use `%d` (repository ID)
or `%p` (repository PHID). These identifiers are immutable and all repositories
always have both identifiers, but they're less human-readable.
Configuring: TextMate on macOS
==============================
TextMate installs a `txmt://` handler by default, so it's easy to configure
this feature if you use TextMate.
First, identify the parent directory where your repositories are stored
(for example, `/Users/alice/repositories/`). Then, configure your editor
pattern like this:
```lang=uri
txmt://open/?url=file:///Users/alice/repositories/%n/%f&line=%l
```
diff --git a/src/docs/user/userguide/forms.diviner b/src/docs/user/userguide/forms.diviner
index 034293ff29..bd65007e9d 100644
--- a/src/docs/user/userguide/forms.diviner
+++ b/src/docs/user/userguide/forms.diviner
@@ -1,515 +1,515 @@
@title User Guide: Customizing Forms
@group userguide
-Guide to prefilling and customizing forms in Phabricator applications.
+Guide to prefilling and customizing forms in Phorge applications.
Overview
========
In most applications, objects are created by clicking a "Create" button from
the main list view, and edited by clicking an "Edit" link from the main detail
view. For example, you create a new task by clicking "Create Task", and edit it
by clicking "Edit Task".
The forms these workflows use can be customized to accommodate a number of
different use cases. In particular:
**Prefilling**: You can use HTTP GET parameters to prefill fields or copy
fields from another object. This is a lightweight way to create a link with
some fields set to initial values. For example, you might want to create a
link to create a task which has some default projects or subscribers.
**Custom Forms**: You can create custom forms which can have default values;
locked, hidden, and reordered fields; and additional instructions. This can let
you make specialized forms for creating certain types of objects, like a
"New Bug Report" form with extra help text or a "New Security Issue" form with
locked policies.
**"Create" Defaults**: You can change the default form available to users for
creating objects, or provide multiple default forms for them to choose between.
This can let you simplify or specialize the creation process.
**"Edit" Defaults**: You can change the default form users are given to edit
objects, which will also affect their ability to take inline actions in the
comment form if you're working in an application which supports comments. This
can streamline the edit workflow for less experienced users.
Anyone can use prefilling, but you must have permission to configure an
application in order to modify the application's forms. By default, only
administrators can configure applications.
The remainder of this document walks through configuring these features in
greater detail.
Supported Applications
======================
These applications currently support form customization:
| Application | Support |
|-------------------|---------|
| Maniphest | Full
| Owners | Full
| Paste | Full
| ApplicationEditor | Meta
This documentation is geared toward use in Maniphest because customizing task
creation flows is the most common use case for many of these features, but the
features discussed here work in any application with support.
These features first became available in December 2015. Additional applications
will support them in the future.
Internally, this infrastructure is called `ApplicationEditor`, and the main
component is `EditEngine`. You may see technical documentation, changelogs, or
internal discussion using these terms.
Prefilling
==========
You can prefill the fields in forms by providing HTTP parameters. For example,
if a form has a "Projects" field, you can generally prefill it by adding a
`projects` parameter to the URI like this:
```
https://your.install.com/application/edit/?projects=skunkworks
```
The parameters available in each application vary, and depend on which fields
the application supports.
For full documentation on a particular form, navigate to that form (by
selecting the "Create" or "Edit" action in the application) and then use
{nav Actions > Show HTTP Parameters} to see full details on which parameters
you can use and how to specify them.
You can also use the `template` parameter to copy fields from an existing
object that you have permission to see. Which fields are copied depend on the
application, but usually content fields (like a name or title) are not copied
while other fields (like projects, subscribers, and object states) are.
The {nav Show HTTP Parameters} page has a full list of which fields will be
copied.
You can combine the `template` parameter with other prefilling. The `template`
will act first, then prefilling will take effect. This allows you to overwrite
template values with prefilled values.
Some use cases for this include:
**Lightweight Integrations**: If you want to give users a way to file tasks from
an external application, this is an easy way to get a basic integration
working. For example, you might have a tool for reviewing error logs in
production that has a link to "File a Bug" about an error. The link could
prefill the `title`, `body` and `projects` fields with details about the log
message and a link back into the external tool.
**Convenience**: You can create lightweight, ad-hoc links that make taking
actions a little easier for users. For example, if you're sending out an email
about a change you just made to a lot of people, you could include instructions
like "If you run into any issues, assign a task to me with details: ..." and
include a link which prefills you as the task assignee.
**Searchbar Commands**: If you use a searchbar plugin which gives you shortcut
commands, you can write a custom shortcut so a command like "bug ..." can
quickly redirect you to a prefilled form.
Creating New Forms
==================
Beyond prefilling forms with HTTP parameters, you can create and save form
configurations. This is more heavyweight than prefilling and allows you to
customize, streamline, or structure a workflow more heavily.
You must be able to configure an application in order to manage its forms.
Form configurations can have special names (like "New Bug Report") and
additional instruction text, and may prefill, lock, hide, and reorder fields.
Prefilling and templating still work with custom form configurations, but only
apply to visible fields.
To create a new form configuration, navigate to an existing form via "Create"
or "Edit" and choose {nav Actions > View Form Configurations}. This will show
you a list of current configurations.
You can also edit existing configurations, including the default configuration.
You can use {nav Create Form} from this screen to create a new configuration.
After setting some basic information you will be able to lock, hide, and
reorder form fields, as well as set defaults.
Clicking {nav Use Form} will take you to the permanent URI for this form. You
can link to this form from elsewhere to take the user directly to your
custom flow.
You can adjust defaults using {nav Change Default Values}. These defaults are
saved with the form, and do not require HTTP parameter prefilling. However,
they work in conjunction with prefilling, and you can use prefilling or
templating to overwrite the defaults for visible fields.
If you set a default value for a field and lock or hide the field, the default
you set will still be respected and can not be overridden with templating
or prefilling. This allows you to force certain forms to create tasks with
specific field values, like projects or policies.
You can also set a view policy for a form. Only users who are able to view the
form can use it to create objects.
There are some additional options ("Mark as Create Form" and
"Mark as Edit Form") which are more complicated and explained in greater
detail later in this document.
Some use cases for this include:
**Tailoring Workflows**: If you have certain intake workflows like
"New Bug Report" or "New Security Issue", you can create forms for them with
more structure than the default form.
You can provide detailed instructions and links to documentation in the
"Preamble" for the form configuration. You might use this to remind users about
reporting guidelines, help them fill out the form correctly, or link to other
resources.
You can hide fields that aren't important to simplify the workflow, or reorder
fields to emphasize things that are important. For example, you might want to
hide the "Priority" field on a bug report form if you'd like all bugs to come
in at the default priority before they are triaged.
You can set default view and edit policies, and optionally lock or hide those
fields. This allows you to create a form that is locked to certain policy
settings.
**Simplifying Forms**: If you rarely (or never) use some object fields, you can
create a simplified form by hiding the fields you don't use regularly, or
hide these fields completely from the default form.
Changing Creation Defaults
=========================
You can control which form or forms are presented to users by default when
they go to create new objects in an application.
Using {nav Mark as "Create" Form} from the detail page for a form
configuration, you can mark a form to appear in the create menu.
-When a user visits the application, Phabricator finds all the form
+When a user visits the application, Phorge finds all the form
configurations that are:
- marked as "create" forms; and
- visible to the user based on policy configuration; and
- enabled.
-If there is only one such form, Phabricator renders a single "Create" button.
+If there is only one such form, Phorge renders a single "Create" button.
(If there are zero forms, it renders the button but disables it.)
-If there are several such forms, Phabricator renders a dropdown which allows
+If there are several such forms, Phorge renders a dropdown which allows
the user to choose between them.
You can reorder these forms by returning to the configuration list and using
{nav Reorder Create Forms} in the left menu.
This logic is also used to select items for the global "Quick Create" menu
in the main menu bar.
Some use cases for this include:
**Simplification**: You can modify the default form to reorder fields, add
instructions, or hide fields you never use.
**Multiple Intake Workflows**: If you have multiple distinct intake workflows
like "New Bug Report" and "New Security Issue", you can mark several forms
as "Create" forms and users will be given a choice between them when they go
to create a task.
These flows can provide different instructions and defaults to help users
provide the desired information correctly.
**Basic and Advanced Workflows**: You can create a simplified "Basic" workflow
which hides or locks some fields, and a separate "Advanced" workflow which
has all of the fields.
If you do this, you can also restrict the visibility policy for the "Advanced"
form to experienced users. If you do, newer users will see a button which
takes them to the basic form, while advanced users will be able to choose
between the basic and advanced forms.
Changing Editing Defaults
=========================
You can control which form users are taken to when they click "Edit" on an
object detail page.
Using {nav Mark as "Edit" Form} from the detail page for a form configuration,
you can mark a form as a default edit form.
When a user goes to edit an object, they are taken to the first form which is:
- marked as an "edit" form; and
- visible to them; and
- enabled.
You can reorder forms by going up one level and using {nav Reorder Edit Forms}
in the left menu. This will let you choose which forms have precedence if
a user has access to multiple edit forms.
The default edit form also controls which which actions are available inline
in the "Comment" form at the bottom of the detail page, for applications which
support comments. If you hide or lock a field, corresponding actions will not
be available.
Some use cases for this include:
**Simplification**: You can modify the default form to reorder fields, add
instructions, or hide fields you never use.
By default, applications tend to have just one form, which is both an edit form
and a create form. You can split this into two forms (one edit form and one
create form) and then simplify the create form without affecting the edit
form.
You might do this if there are some fields you still want access to that you
never modify when creating objects. For example, you might always want to
create tasks with status "Open", and just hide that field from from the create
form completely. A separate edit form can still give you access to these fields
if you want to adjust them later.
**Basic and Advanced Workflows**: You can create a basic edit form (with fewer
fields available) and an advanced edit form, then restrict access to the
advanced form to experienced users.
By ordering the forms as "Advanced", then "Basic", and applying a view policy
to the "Advanced" form, you can send experienced users to the advanced form
and less experienced users to the basic form.
For example, you might use this to hide policy controls or task priorities from
inexperienced users.
Understanding Policies
======================
IMPORTANT: Simplifying workflows by restricting access to forms and fields does
**not** enforce policy controls for those fields.
The configurations described above which simplify workflows are advisory, and
are intended to help users complete workflows quickly and correctly. A user who
has very limited access to an application through forms will generally still be
able to use other workflows (like Conduit, Herald, Workboards, email, and other
applications and integrations) to directly or indirectly modify fields.
For example, even if you lock a user out of all the forms in an application
that have a "Subscribers" field, they can still add subscribers indirectly by
using `@username` mentions.
We do not currently plan to change this or introduce enforced, platform-wide
field-level policy controls. These form customization features are generally
aimed at helping well-intentioned but inexperienced users complete workflows
quickly and correctly.
Disabling Form Configurations
=============================
You can disable a form configuration from the form configuration details screen,
by selecting {nav Disable Form}.
Disabled forms do not appear in any menus by default, and can not be used to
create or edit objects.
Use Case: Specialized Report Form
=================================
A project might want to provide a specialized bug report form for a specific
type of issue. For example, if you have an Android application, you might have
an internal link in that application for employees to "Report a Bug".
A simple way to do this would be to link to the default form and use HTTP
parameter prefilling to set a project. You might end up with a link like this
one:
```
https://your.install.com/maniphest/task/edit/?projects=android
```
A slightly more advanced method is to create a template task, then use it to
prefill the form. For example, you might set some projects, subscribers, and
custom field values on the template task. Then have the application link to
the a URI that prefills using the template:
```
https://your.install.com/maniphest/task/edit/?template=123
```
This is a little easier to use, and lets you update the template later if you
want to change anything about the defaults that the new tasks are created
with.
An even more advanced method is to create a new custom form configuration.
You could call this something like "New Android Bug Report". In addition to
setting defaults, you could lock, hide, or reorder fields so that the form
only presents the fields that are relevant to the workflow. You could also
provide instructions to help users file good reports.
After customizing your form configuration, you'd link to the {nav Use Form}
URI, like this:
```
https://your.install.com/maniphest/task/edit/form/123/
```
You can also combine this with templating or prefilling to further specialize
the flow.
Use Case: Simple Report Flow
============================
An open source project might want to give new users a simpler bug report form
with fewer fields and more instructions.
To do this, create a custom form and configure it so it has only the relevant
fields and includes any instructions. Once it looks good, mark it as a "Create"
form.
The "Create Task" button should now change into a menu and show both the
default form and the new simpler form, as well as in the global "Quick Create"
menu in the main menu bar.
If you prefer the fields appear in a different order, use
{nav Reorder Create Forms} to adjust the display order. (You could also rename
the default creation flow to something like "Create Advanced Task" to guide
users toward the best form).
Use Case: Basic and Advanced Users
==================================
An open source project or a company with a mixture of experienced and less
experienced users might want to give only some users access to adjust advanced
fields like "View Policy" and "Edit Policy" when creating tasks.
Before configuring things like this, make sure you review "Understanding
Policies" above.
To do this, first customize four forms:
- Basic Create
- Advanced Create
- Basic Edit
- Advanced Edit
You can customize these however you'd like.
The "Advanced" forms should have more fields, while the "Basic" forms should
be simpler. You may want to add additional instructions to the "Basic Create"
form.
Then:
- Mark the two "Create" forms as create forms.
- Mark the two "Edit" forms as edit forms.
- Limit the visibility of the two "Advanced" forms to only advanced users
(for example, "Members of Project: Elite Strike Force").
- Use {nav Reorder Edit Forms} to make sure the "Advanced" edit form is at
the top of the list. The first visible form on this list will be used, so
this makes sure advanced users see the advanced edit form.
Basic users should now only have access to basic fields when creating, editing,
and commenting on tasks, while advanced users will retain full access.
Use Case: Security Issues
=========================
If you want to make sure security issues are reported with the correct
policies, you can create a "New Security Issue" form. On this form, prefill the
View and Edit policies and lock or hide them, then lock or hide any additional
fields (like projects or subscribers) that you don't want users to adjust. You
might use a custom policy like this for both the View and Edit policies:
> Allow: Members of Project "Security"
> Allow: Task Author
> Deny all other users
This will make it nearly impossible for users to make policy mistakes, and will
prevent other users from observing these tasks indirectly through Herald rules.
You should review "Understanding Policies" above before pursuing this. In
particular, note that the author may still be able to leak information about
the report like this:
- if they have access to a full-power edit form, they can edit the task
//after// creating it and open the policies; or
- regardless of their edit form access, they can use the Conduit API to
change the task policy; or
- - regardless of any policy controls in Phabricator, they can screenshot,
+ - regardless of any policy controls in Phorge, they can screenshot,
print, or forward email about the task to anyone; or
- regardless of any technical controls in any software, they can decline to
report the issue to you in the first place and sell it on the black market
instead.
This goals of this workflow are to:
- prevent other users from observing security issues improperly through
mechanisms like Herald; and
- prevent mistakes by well-meaning reporters who are unfamiliar with
the software.
It is **not** aimed at preventing reporters who are already in possession of
information from //intentionally// disclosing that information, since they have
many other channels by which to do this anyway and no software can ever prevent
it.
Use Case: Upstream
==================
This section describes the upstream configuration circa December 2015. The
current configuration may not be exactly the same as the one described below.
We run an open source project with a small core team, a moderate number
of regular contributors, and a large public userbase. Access to the upstream
-Phabricator instance is open to the public.
+Phorge instance is open to the public.
Although our product is fairly technical, we receive many bug reports and
feature requests which are of very poor quality. Some users also ignore all the
documentation and warnings and use the upstream instance as a demo/test
instance to click as many buttons as they can.
The goals of our configuration are:
- Provide highly structured "New Bug Report" and "New Feature Request"
workflows which make things as easy as possible to get right, in order
to improve the quality of new reports.
- Separate the userbase into "basic" and "advanced" users. Give the
basic users simpler, more streamlined workflows, to make expectations
more clear, improve report quality, and limit collateral damage from
testing and fiddling.
To these ends, we've configured things like this:
**Community Project**: Advanced users are added to a "Community" project, which
gives them more advanced access. Advanced forms are "Visible To: Members of
Project Community".
**Basic and Advanced Edit**: We have basic and advanced task edit forms.
Members of the community project get access to the advanced one, while other
users only have access to the basic one.
**Bug, Feature and Advanced Create**: We have "New Bug", "New Feature" and
"New Advanced Task" creation forms.
The advanced form is the standard creation form, and is only accessible to
community members.
The basic forms have fewer fields, and each form provides tailored instructions
which point users at relevant documentation to help them provide good reports.
The basic versions of these forms also have their "Edit Policy" locked down to
members of the "Community" project and the task author. This means that users
generally can't mess around with other users' reports, but more experienced
users can still help manage and resolve tasks.
diff --git a/src/docs/user/userguide/harbormaster.diviner b/src/docs/user/userguide/harbormaster.diviner
index a6f2047fdd..cf4b234e82 100644
--- a/src/docs/user/userguide/harbormaster.diviner
+++ b/src/docs/user/userguide/harbormaster.diviner
@@ -1,230 +1,230 @@
@title Harbormaster User Guide
@group userguide
Guide to Harbormaster, a build and continuous integration application.
Overview
========
WARNING: Harbormaster is still very rough. Read this document carefully to
understand what it can and can not do and what to expect in the future.
The Harbormaster application provides build and continuous integration support
-for Phabricator.
+for Phorge.
Harbormaster is not a mature application. You should expect it to have major
missing capabilities and to change substantially over time. The current version
of Harbormaster can perform some basic build tasks, but has many limitations
and is not a complete build platform.
In particular, you should be aware of these common limitations:
- **Creating Build Plans**: Harbormaster ships with only very basic, crude
tools for writing build plans. There are no default integrations with
`arc unit` or systems like Jenkins. Build plans are likely to change
substantially over time.
- **Triggering Builds**: Harbormaster can only trigger builds through Herald
rules. It can not currently run periodic builds.
- **Executing Builds**: Harbormaster can only execute builds in a remote
system, like Jenkins. It can not currently host builds.
- **Change Handoff**: Change handoff is covered in rough edges and tradeoffs.
Harbormaster Basics
===================
Use Harbormaster to run builds or tests on code reviews and commits. In general,
the Harbormaster workflow looks like this today:
- You create a new "Build Plan" which describes how to build a project (which
tests to run, which commands to execute, etc).
- You configure Harbormaster to trigger the plan when relevant code reviews
are created or relevant commits are pushed or discovered.
- Harbormaster executes the plan and reports the results, allowing you to see
if a change or commit breaks tests.
The remainder of this document walks through these steps in more detail.
Concepts and Terminology
========================
Harbormaster uses these concepts to describe builds:
- **Build Step**: Describes a single step in a build process, like running a
command.
- **Build Plan**: A collection of build steps which describe a build process.
You'll create build plans to tell Harbormaster which commands it needs to
run to perform a build.
- **Buildable**: A reference to an object from another application which can
have builds run against it. In the upstream, code reviews (from
Differential) and commits (from Diffusion) are buildable.
- **Build**: Created by running a build plan against a buildable. Collects
results from running build commands and shows build progress, status and
results. A build describes what happened when an entire build plan was
run.
- **Build Target**: Builds are made up of build targets, which are created
automatically when Harbormaster runs the individual steps in a build. A
build target describes what happened when a specific build step was run.
Creating a Build Plan
=====================
NOTE: Build plans are currently crude and subject to change in future versions
of Harbormaster.
A build plan tells Harbormaster how to run a build: which commands to run,
services to call, and so on. Builds start with a build plan.
To create a build plan, navigate to {nav Harbormaster > Manage Build Plans >
New Build Plan}.
Build plans are composed of "Build Steps". Each step describes an individual
action (like running a command) and the sequence of steps as a whole comprise
the plan. For example, you might want to run one command to build a binary,
then a second command to execute unit tests. Add steps to your build plan
with {nav Add Build Step}.
Currently, the only useful type of build step is "Make HTTP Request", which you
can use to make a call to an external build system like Jenkins. Today, most
plans should therefore look something like this:
- Use a "Make HTTP Request" step to tell Jenkins or some other similar
external build system about the code.
- Have the build step "Wait for Message" after the external system is
notified.
- Write custom code on the build server to respond to the request, run a
- build, then report the results back to Phabricator by calling the
+ build, then report the results back to Phorge by calling the
`harbormaster.sendmessage` Conduit API.
You'll need to write a nontrivial amount of code to get this working today.
In the future, Harbormaster will become more powerful and have more builtin
support for interacting with build systems.
Triggering Builds
=================
NOTE: Harbormaster can not currently watch a branch (like "build 'master' every
time it changes") or run periodic builds (like "build every hour"). These
capabilities may be added in the future.
You can run builds manually by using {nav Run Plan Manually} from the detail
screen of a build plan. This will execute a manual build immediately, and can
be used to test that plans work properly.
To trigger a build automatically, write a Herald rule which executes the "Run
build plans" action. The simplest rule would just use the "Always" condition
and run a single build plan, but you can use more complex conditions to control
which plans run on which code.
This action is available for commits and revisions, as either can be built
with Harbormaster. This action is only available for "Project" or "Global"
rules.
Change Handoff
==============
NOTE: Change handoff is currently very rough. It may improve in the future.
If you want to build code reviews in an external system, it will need to be
able to construct a working copy with the changes before it can build them.
There are three ways to do this:
- **Automatic Staging Areas**: Recommended. This is the simplest and
cleanest way to hand changes to an external build system.
- **Manual Staging Areas**: Recommended if you can not use automatic
staging areas. This is a simple way to hand changes to an external build
system, but not as clean as automatic staging areas.
- **`arc patch`**: Not recommended. This mechanism is the most difficult to
configure and debug, and is not nearly as reliable as handoff via staging
areas.
With staging areas, `arc` pushes a copy of the local changes somewhere as a
side effect of running `arc diff`. In Git, it pushes changes to a tag like
`phabricator/diff/123` in a designated remote.
The build system can then interact with this copy using normal VCS commands.
This is simpler to configure, use, troubleshoot and work with than `arc patch`.
-With `arc patch`, the build system downloads patches from Phabricator and
+With `arc patch`, the build system downloads patches from Phorge and
applies them to a local working copy. This is more complex and more error-prone
than staging areas.
**Automatic Staging Areas**: This is the recommended mechanism for change
handoff. This mechanism has not been built yet, so you can not use it.
**Manual Staging Areas**: If you can not use automatic staging areas, manual
staging areas are the next best approach. Manual staging areas are only
supported under Git, but work with both hosted and imported repositories.
Manual staging areas work like this:
- You configure a staging area for the repository you want to be able to
run builds for. A staging area is just a remote repository that you're
designating for temporary storage.
- Once a staging area is configured, `arc diff` will automatically push a
copy of the changes to the staging area as a side effect when creating
and updating reviews.
- Your build system can pull changes directly from the configured staging
area.
Configure a staging area by navigating to {nav Diffusion >
(Choose a Repository) > Edit Repository > Edit Staging}. You'll enter the
remote URI of a repository to use as a staging area, and `arc diff` will push
changes to tags like `phabricator/diff/123`.
There are several ways to select a staging area:
- You can use the repository itself as its own staging area, but this will
clog it up with a lot of tags that users probably don't care about. This is
simplest to configure but will be disruptive and potentially confusing to
users.
- You can create a single staging repository and have all other
repositories use it as a staging area. This is simple to configure and
won't disrupt or confuse users, but you won't be able to set granular
permissions on the staging repository: all the staged changes in a
repository are visible to anyone who has access to the repository, even if
they came from a repository that the viewer does not have access to.
- You can create a staging repository for each standard repository. This will
give you the most control, but is also the most time consuming to configure.
- You can use a hybrid approach and have several staging repositories, each
of which is used for one or more standard repositories. This will let you
strike a balance between setup costs and granularity.
- Using automatic staging areas avoids all this complexity by using the
repository as its own staging area but hiding the tags from users.
Once you've configured a staging area, have your build system clone the staging
area repository and do a checkout of the relevant tag in order to perform a
build.
**`arc patch`**: You can also have the build system pull changes out of
-Phabricator as patches and apply them with `arc patch`. This mechanism is the
+Phorge as patches and apply them with `arc patch`. This mechanism is the
most complex to configure and debug, and is much less reliable than using
staging areas. It is not recommended.
To use `arc patch`-based handoff, install PHP on your build server and set up
`arc`. Create a "bot" user for your build system and generate a Conduit token
in {nav Settings > Conduit API Tokens}. Then have your build system clone the
repository and run `arc patch` to apply the changes:
$ arc patch --conduit-token <token> --diff <diff-id>
This will usually work, but is more complex and less reliable than using a
staging area.
Troubleshooting
===============
You can troubleshoot Harbormaster by using `bin/harbormaster` from the command
line. Run it as `bin/harbormaster help` for details.
In particular, you can run manual builds in the foreground from the CLI to see
more details about what they're doing:
- phabricator/ $ ./bin/harbormaster build D123 --plan 456 --trace
+ phorge/ $ ./bin/harbormaster build D123 --plan 456 --trace
This may help you understand or debug issues with a build plan.
diff --git a/src/docs/user/userguide/herald.diviner b/src/docs/user/userguide/herald.diviner
index b7cfdeca95..a3cfa26f54 100644
--- a/src/docs/user/userguide/herald.diviner
+++ b/src/docs/user/userguide/herald.diviner
@@ -1,178 +1,178 @@
@title Herald User Guide
@group userguide
Use Herald to get notified of changes you care about.
Overview
========
Herald allows you to write rules which run automatically when objects (like
tasks or commits) are created or updated. For instance, you might want to get
notified every time someone sends out a revision that affects some file you're
interested in, even if they didn't add you as a reviewer.
One way to think about Herald is that it is a lot like the mail rules you can
set up in most email clients to organize mail based on "To", "Subject", etc.
-Herald works very similarly, but operates on Phabricator objects (like revisions
+Herald works very similarly, but operates on Phorge objects (like revisions
and commits) instead of emails.
For example, you can write a personal rule like this which triggers on tasks:
> When [ all of ] these conditions are met:
> [ Title ][ contains ][ quasar ]
> Take these actions [ every time this rule matches: ]
> [ Add me as a subscriber ]
This rule will automatically subscribe you to any newly created or updated
tasks that contain "quasar" in the title.
Herald rules are often used to: notify users, add reviewers, initiate audits,
classify objects, block commits, enforce CLAs, and run builds.
Working with Rules
==================
To create new Herald rules, navigate to the {nav Herald} application and select
{nav Create Herald Rule}.
Next, you'll choose an event that you want to write a rule for: for example,
a rule for when commits are discovered or a rule for when tasks are created or
updated.
After selecting an event, choose the type of rule to create. See "Rule Types"
below for a more detailed discussion.
Name the rule and provide conditions and actions. When events occur, the rule
will be evaluated automatically. If the conditions pass, the actions will be
taken.
To test rules, use {nav Herald > Test Console}. See "Testing Rules" below
for greater detail.
To review which rules did or did not trigger for a particular event (and why),
see {nav Herald > Transcripts}.
Rule Types
==========
You can create three kinds of Herald rules: personal rules, object rules, and
global rules.
- **Personal Rules** are rules owned by an individual. They're often used
to keep people informed about changes they're interested in.
- **Object Rules** are rules associated with an object (like a repository
or project). These are similar to global rules.
- **Global Rules** are apply to all objects. They're often used to block
commits or run builds.
Rule Policies
=============
All Herald rules are always visible to all users.
The edit policy for a rule depends on what type of rule it is:
- Personal rules are owned by a particular user, and can only be created or
edited by that user.
- Object rules are associated with a particular object (like a repository),
and can only be created or edited by users who can edit that object. That
is, if you can edit a repository, you can also create object rules for it
and edit existing object rules.
- Global rules are administrative and can only be created or edited by users
with the **Can Manage Global Rules** Herald application permission.
When rules are about to evaluate, they may first perform some policy tests.
- Personal rules check if the owning user can see the object which the rule
is about to run on. If the user can not see the object, the rule does not
run. This prevents individuals from writing rules which give them access
to information they don't have permission to see.
- Object and global rules **bypass policies** and always execute. This makes
them very powerful, and is why the **Can Manage Global Rules** policy is
restricted by default.
Testing Rules
=============
When you've created a rule, use the {nav Herald > Test Console} to test it out.
Enter an object name (like `D123`, `rXYZabcdef`, or `T456`) and Herald will
execute a dry run against that object, showing you which rules //would// match
had it actually been updated. Dry runs executed via the test console don't take
any actions.
Action Repetition Settings
==========================
Rules can be configured to act in different ways:
**Every time the rule matches:** The rule will take actions every time the
object is updated if the rule's conditions match the current object state.
**Only the first time the rule matches:** The rule will take actions only once
per object, regardless of how many times the object is updated. After the rule
acts once, it won't run on the same object again.
**If this rule did not match the last time:** This rule will take actions the
first time it matches for an object. After that, it won't act unless the object
just changed from not matching to matching.
For example, suppose you have a rule like this:
> When:
> [ Title ][ contains ][ duck ]
> Take actions [ if this rule did not match the last time: ]
> [ Add comment ][ "Please prefer the term 'budget goose'." ]
If you set this rule to act "every time", it will leave a comment on the task
for every single update until the title is edited. This is usually pretty noisy.
If you set this rule to act "only the first time", it will only leave one
comment. This fixes the noise problem, but creates a new problem: if someone
edits the title, then a later change breaks it again, the rule won't leave
another reminder comment.
If you set this rule to act "if it did not match the last time", it will leave
one comment on matching tasks. If the task is fixed (by replacing the term
"duck" with the term "budget goose", so the object no longer matches the rule)
and then later changed to violate the rule again (by putting the term
"duck" back in the title), the rule will act again.
Advanced Herald
===============
A few features in Herald are particularly complicated or unintuitive.
Condition **matches regexp pair**: Some conditions allow you to select the
operator "matches regexp pair". For example, you can write a rule against
revisions like this one:
> When [ all of ] these conditions are met:
> [ Changed file content ][ matches regexp pair ][ ... ]
This condition allows you to specify two regexes in JSON format. The first will
be used to match the filename of the changed file; the second will be used to
match the content. You can use these together to express conditions like
"content in Javascript files".
For example, if you want to match revisions which add or remove calls to a
"muffinize" function, //but only in JS files//, you can set the value to
`["/\\.js$/", "/muffinize/"]` or similar. This condition is satisfied only
when the filename matches the first expression and the content matches the
second expression.
**Another Herald rule**: you can create Herald rules which depend on other
rules.
This can be useful if you need to express a more complicated condition
than "all" vs "any" allows, or have a common set of conditions which you want
to share between several rules.
If a rule is only being used as a group of conditions, you can set the action
to "Do Nothing".
diff --git a/src/docs/user/userguide/jump.diviner b/src/docs/user/userguide/jump.diviner
index 0421f194a8..8e6277303b 100644
--- a/src/docs/user/userguide/jump.diviner
+++ b/src/docs/user/userguide/jump.diviner
@@ -1,39 +1,39 @@
@title Search User Guide: Shortcuts
@group userguide
Command reference for global search shortcuts.
Overview
========
-Phabricator's global search bar automatically interprets certain commands as
+Phorge's global search bar automatically interprets certain commands as
shortcuts to make it easy to navigate to specific places.
To use these shortcuts, just type them into the global search bar in the main
menu and press return. For example, enter `T123` to jump to the corresponding
task quickly.
Supported Commands
========
- **T** - Jump to Maniphest.
- **T123** - Jump to Maniphest Task 123.
- **D** - Jump to Differential.
- **D123** - Jump to Differential Revision 123.
- **r** - Jump to Diffusion.
- **rXYZ** - Jump to Diffusion Repository XYZ.
- **rXYZabcdef** - Jump to Diffusion Commit rXYZabcdef.
- **r <name>** - Search for repositories by name.
- **u** - Jump to People
- **u username** - Jump to username's Profile
- **p** - Jump to Project
- **p Some Project** - Jump to Project: Some Project
- **s SymbolName** - Jump to Symbol SymbolName
- **(default)** - Search for input.
Next Steps
==========
Continue by:
- returning to the @{article:Search User Guide}.
diff --git a/src/docs/user/userguide/legalpad.diviner b/src/docs/user/userguide/legalpad.diviner
index 053f275d9f..cd5f6578e9 100644
--- a/src/docs/user/userguide/legalpad.diviner
+++ b/src/docs/user/userguide/legalpad.diviner
@@ -1,135 +1,125 @@
@title Legalpad User Guide
@group userguide
Using Legalpad to track agreements and signatures on legal documents.
Overview
========
Legalpad is a simple application for tracking signatures on legal agreements.
You can add legal documents, users can sign them, and you can keep track of who
has signed what.
Right now, it is primarily useful for open source projects that have a
Contributor License Agreement or a similar document which needs to be signed
before changes can be accepted from contributors. In particular, it has
integrations into Differential which can block changes from being accepted until
the author has signed the required documents.
NOTE: Legalpad is a basic application, and missing many of the features of more
general document signing software. It may be useful to help you do things
beyond track CLAs, but you should evaluate its capabilities carefully.
Documents
=========
The primary object in legalpad is the //Legalpad Document//, which represents
a written agreement, contract, policy, or other similar document.
Most fields of a document are relatively straightforward, but some are unique
to the application:
**Who Should Sign?** This field controls what kind of signatures the document
accepts. You can choose either **Individuals** (users will be prompted to sign
with their name), **Corporations** (users will be prompted to enter information
identifying the corporation they are signing on behalf of) or **No One** (for
policy documents or other documents which do not require a signature).
**Require Signature** This field allows you to create a document which all of
-your users must sign before they can use Phabricator, like a terms of service
+your users must sign before they can use Phorge, like a terms of service
document. See "Use Case: Terms of Service" below for details. These documents
must be signable by individuals.
Use Case: Requiring a CLA
===============
Open source projects often require contributors to sign a license agreement
before their contributions can be accepted to the project. To require a CLA or
similar document for an open source project:
- Create a CLA document in Legalpad.
- Create a "Global" Herald rule which triggers "Always".
- The rule should take the action "Require legal signatures", specifying
your CLA document as the required document.
After you've done this, all new reviews created in Differential by authors who
have not signed the document will trigger a signature requirement. These reviews
can not be accepted until the document has been signed.
The content of these revisions will also be hidden until the document has been
signed. This prevents reviewers from being tainted by examining the changes if
the author ultimately declines to sign the CLA.
If the author has already signed all of the required documents, Herald will not
take any actions. This reduces the amount of noise the CLA process generates for
regular contributors.
You can require more than one document (to require that they all be signed), if
you have several agreements that contributors must sign.
Alternatively, if you have several different sets of agreements for different
projects, you can also choose a more narrow Herald condition than "Always" (for
example, require a signature only if the revision is against certain
repositories).
Use Case: Terms of Service
=================================
If you have a "Terms of Service" document that you'd like users to agree to
before they're allowed to use your install, you can add it to Legalpad and then
check the **Require Signature** box for the document.
After logging in, users will need to agree to the document before they can
do other things with their account (you'll need to agree to it, too, as soon
as you save your changes, so that will give you a sense of the workflow).
Note that although users who have not signed all of the required documents can
-not use most Phabricator functions, they can browse other Legalpad documents
+not use most Phorge functions, they can browse other Legalpad documents
that they have permission to see. This allows a terms document to be
supplemented with additional policy or guideline documents that users are free
to review before agreeing to the terms.
Use Case: Document-Based Policies
=======================
If you have a document like an NDA, you can write a policy rule which prevents
users from seeing content until they sign the document:
- In any policy control ("Visible To", "Editable By"), choose "Custom Policy".
- Add a rule like "Allow signers of legalpad documents: X".
- Leave the default rule as "Deny all other users".
- Save the policy.
Users will now only be able to take the action (for example, view or edit the
object) if they have signed the specified documents.
Adding Exemptions
=================
If you have users who have signed an alternate form of a document (for example,
you have a hard copy on file), or an equivalent document, or who are otherwise
exempt from needing to sign a document in Legalpad, you can add a signature
exemption for them.
Other applications will treat users with a signature exemption as though they
had signed the document, although the UI will show the signature as an exemption
rather than a normal signature.
To add an exemption, go to **Manage Document**, then **View Signatures**, then
**Add Signature Exemption**.
You can optionally add notes about why a user is exempt from signing a document.
To review the notes later (and see who added the exemption), click the colored
asterisk in the list view.
-
-
-Roadmap
-========
-
-You can find discussion about the Legalpad roadmap here:
-
-https://secure.phabricator.com/T5505
-
-If there are features you'd like to see, let us know.
diff --git a/src/docs/user/userguide/mail_rules.diviner b/src/docs/user/userguide/mail_rules.diviner
index cdbbf1919d..5eb20043a5 100644
--- a/src/docs/user/userguide/mail_rules.diviner
+++ b/src/docs/user/userguide/mail_rules.diviner
@@ -1,69 +1,69 @@
-@title User Guide: Managing Phabricator Email
+@title User Guide: Managing Phorge Email
@group userguide
-How to effectively manage Phabricator email notifications.
+How to effectively manage Phorge email notifications.
Overview
========
-Phabricator uses email as a major notification channel, but the amount of email
+Phorge uses email as a major notification channel, but the amount of email
it sends can seem overwhelming if you're working on an active team. This
document discusses some strategies for managing email.
By far the best approach to managing mail is to **write mail rules** to
categorize mail. Essentially all modern mail clients allow you to quickly
write sophisticated rules to route, categorize, or delete email.
Reducing Email
==============
You can reduce the amount of email you receive by turning off some types of
email in {nav Settings > Email Preferences}. For example, you can turn off email
produced by your own actions (like when you comment on a revision), and some
types of less-important notifications about events.
Mail Rules
==========
The best approach to managing mail is to write mail rules. Simply writing rules
to move mail from Differential, Maniphest and Herald to separate folders will
vastly simplify mail management.
-Phabricator also adds mail headers (see below) which can allow you to write
+Phorge also adds mail headers (see below) which can allow you to write
more sophisticated mail rules.
Mail Headers
============
-Phabricator sends various information in mail headers that can be useful in
+Phorge sends various information in mail headers that can be useful in
crafting rules to route and manage mail. To see a full list of headers, use
the "View Raw Message" feature in your mail client.
The most useful header for routing is generally `X-Phabricator-Stamps`. This
is a list of attributes which describe the object the mail is about and the
actions which the mail informs you about.
Stamps and Gmail
================
If you use a client which can not perform header matching (like Gmail), you can
change the {nav Settings > Email Format > Send Stamps} setting to include the
stamps in the mail body and then match them with body rules.
When writing filter rules against mail stamps in Gmail, you should quote any
filters you want to apply. For example, specify rules like this, with quotes:
> "author(@alice)"
Note that Gmail will ignore some symbols when matching mail against filtering
rules, so you can get false positives if the body of the message includes text
like `author alice` (the same words in the same order, without the special
symbols).
You'll also get false positives if the message body includes the text of a
mail stamp explicitly in a normal text field like a summary, description, or
comment.
There's no way to avoid these false positives other than using a different
client with support for more powerful filtering rules, but these false
positives should normally be uncommon.
diff --git a/src/docs/user/userguide/multi_factor_auth.diviner b/src/docs/user/userguide/multi_factor_auth.diviner
index eca85d0f92..cccf3e12b9 100644
--- a/src/docs/user/userguide/multi_factor_auth.diviner
+++ b/src/docs/user/userguide/multi_factor_auth.diviner
@@ -1,222 +1,222 @@
@title User Guide: Multi-Factor Authentication
@group userguide
-Explains how multi-factor authentication works in Phabricator.
+Explains how multi-factor authentication works in Phorge.
Overview
========
Multi-factor authentication allows you to add additional credentials to your
account to make it more secure.
Once multi-factor authentication is configured on your account, you'll usually
use your mobile phone to provide an authorization code or an extra confirmation
when you try to log in to a new session or take certain actions (like changing
your password).
Requiring you to prove you're really you by asking for something you know (your
password) //and// something you have (your mobile phone) makes it much harder
for attackers to access your account. The phone is an additional "factor" which
protects your account from attacks.
How Multi-Factor Authentication Works
=====================================
If you've configured multi-factor authentication and try to log in to your
account or take certain sensitive actions (like changing your password),
you'll be stopped and asked to enter additional credentials.
Usually, this means you'll receive an SMS with a authorization code on your
phone, or you'll open an app on your phone which will show you a authorization
code or ask you to confirm the action. If you're given a authorization code,
-you'll enter it into Phabricator.
+you'll enter it into Phorge.
-If you're logging in, Phabricator will log you in after you enter the code.
+If you're logging in, Phorge will log you in after you enter the code.
-If you're taking a sensitive action, Phabricator will sometimes put your
+If you're taking a sensitive action, Phorge will sometimes put your
account in "high security" mode for a few minutes. In this mode, you can take
sensitive actions like changing passwords or SSH keys freely, without
entering any more credentials.
You can explicitly leave high security once you're done performing account
management, or your account will naturally return to normal security after a
short period of time.
While your account is in high security, you'll see a notification on screen
with instructions for returning to normal security.
Configuring Multi-Factor Authentication
=======================================
To manage authentication factors for your account, go to
{nav Settings > Multi-Factor Auth}. You can use this control panel to add
or remove authentication factors from your account.
You can also rename a factor by clicking the name. This can help you identify
factors if you have several similar factors attached to your account.
For a description of the available factors, see the next few sections.
Factor: Mobile Phone App (TOTP)
===============================
TOTP stands for "Time-based One-Time Password". This factor operates by having
-you enter authorization codes from your mobile phone into Phabricator. The codes
+you enter authorization codes from your mobile phone into Phorge. The codes
change every 30 seconds, so you will need to have your phone with you in order
to enter them.
To use this factor, you'll download an application onto your smartphone which
can compute these codes. Two applications which work well are **Authy** and
**Google Authenticator**. These applications are free, and you can find and
download them from the appropriate store on your device.
Your company may have a preferred application, or may use some other
application, so check any in-house documentation for details. In general, any
TOTP application should work properly.
-After you've downloaded the application onto your phone, use the Phabricator
+After you've downloaded the application onto your phone, use the Phorge
settings panel to add a factor to your account. You'll be prompted to scan a
QR code, and then read an authorization code from your phone and type it into
-Phabricator.
+Phorge.
Later, when you need to authenticate, you'll follow this same process: launch
-the application, read the authorization code, and type it into Phabricator.
+the application, read the authorization code, and type it into Phorge.
This will prove you have your phone.
-Don't lose your phone! You'll need it to log into Phabricator in the future.
+Don't lose your phone! You'll need it to log into Phorge in the future.
Factor: SMS
===========
This factor operates by texting you a short authorization code when you try to
log in or perform a sensitive action.
To use SMS, first add your phone number in {nav Settings > Contact Numbers}.
Once a primary contact number is configured on your account, you'll be able
to add an SMS factor.
To enroll in SMS, you'll be sent a confirmation code to make sure your contact
number is correct and SMS is being delivered properly. Enter it when prompted.
When you're asked to confirm your identity in the future, you'll be texted
an authorization code to enter into the prompt.
(WARNING) SMS is a very weak factor and can be compromised or intercepted. For
details, see: <https://phurl.io/u/sms>.
Factor: Duo
===========
This factor supports integration with [[ https://duo.com/ | Duo Security ]], a
third-party authentication service popular with enterprises that have a lot of
policies to enforce.
To use Duo, you'll install the Duo application on your phone. When you try
to take a sensitive action, you'll be asked to confirm it in the application.
Administration: Configuration
=============================
-New Phabricator installs start without any multi-factor providers enabled.
+New Phorge installs start without any multi-factor providers enabled.
Users won't be able to add new factors until you set up multi-factor
authentication by configuring at least one provider.
Configure new providers in {nav Auth > Multi-Factor}.
Providers may be in these states:
- **Active**: Users may add new factors. Users will be prompted to respond
to challenges from these providers when they take a sensitive action.
- **Deprecated**: Users may not add new factors, but they will still be
asked to respond to challenges from exising factors.
- **Disabled**: Users may not add new factors, and existing factors will
not be used. If MFA is required and a user only has disabled factors,
they will be forced to add a new factor.
If you want to change factor types for your organization, the process will
normally look something like this:
- Configure and test a new provider.
- Deprecate the old provider.
- Notify users that the old provider is deprecated and that they should move
to the new provider at their convenience, but before some upcoming
deadline.
- Once the deadline arrives, disable the old provider.
Administration: Requiring MFA
=============================
As an administrator, you can require all users to add MFA to their accounts by
setting the `security.require-multi-factor-auth` option in Config.
Administration: Recovering from Lost Factors
============================================
If a user has lost a factor associated with their account (for example, their
phone has been lost or damaged), an administrator with host access can strip
the factor off their account so that they can log in without it.
IMPORTANT: Before stripping factors from a user account, be absolutely certain
that the user is who they claim to be!
It is important to verify the user is who they claim they are before stripping
factors because an attacker might pretend to be a user who has lost their phone
in order to bypass multi-factor authentication. It is much easier for a typical
attacker to spoof an email with a sad story in it than it is for a typical
attacker to gain access to a mobile phone.
A good way to verify user identity is to meet them in person and have them
solemnly swear an oath that they lost their phone and are very sorry and
definitely won't do it again. You can also work out a secret handshake in
advance and require them to perform it. But no matter what you do, be certain
the user (not an attacker //pretending// to be the user) is really the one
making the request before stripping factors.
After verifying identity, administrators with host access can strip
authentication factors from user accounts using the `bin/auth strip` command.
For example, to strip all factors from the account of a user who has lost
their phone, run this command:
```lang=console
# Strip all factors from a given user account.
-phabricator/ $ ./bin/auth strip --user <username> --all-types
+phorge/ $ ./bin/auth strip --user <username> --all-types
```
You can run `bin/auth help strip` for more detail and all available flags and
arguments.
This command can selectively strip factors by factor type. You can use
`bin/auth list-factors` to get a list of available factor types.
```lang=console
# Show supported factor types.
-phabricator/ $ ./bin/auth list-factors
+phorge/ $ ./bin/auth list-factors
```
Once you've identified the factor types you want to strip, you can strip
matching factors by using the `--type` flag to specify one or more factor
types:
```lang=console
# Strip all SMS and TOTP factors for a user.
-phabricator/ $ ./bin/auth strip --user <username> --type sms --type totp
+phorge/ $ ./bin/auth strip --user <username> --type sms --type totp
```
The `bin/auth strip` command can also selectively strip factors for certain
providers. This is more granular than stripping all factors of a given type.
You can use `bin/auth list-mfa-providers` to get a list of providers.
Once you have a provider PHID, use `--provider` to select factors to strip:
```lang=console
# Strip all factors for a particular provider.
-phabricator/ $ ./bin/auth strip --user <username> --provider <providerPHID>
+phorge/ $ ./bin/auth strip --user <username> --provider <providerPHID>
```
diff --git a/src/docs/user/userguide/multimeter.diviner b/src/docs/user/userguide/multimeter.diviner
index 6aab740486..e2f242bcdc 100644
--- a/src/docs/user/userguide/multimeter.diviner
+++ b/src/docs/user/userguide/multimeter.diviner
@@ -1,99 +1,99 @@
@title Multimeter User Guide
@group userguide
Using Multimeter, a sampling profiler.
Overview
========
IMPORTANT: This document describes a prototype application.
Multimeter is a sampling profiler that can give you coarse information about
-Phabricator resource usage. In particular, it can help quickly identify sources
+Phorge resource usage. In particular, it can help quickly identify sources
of load, like bots or scripts which are making a very large number of requests.
Configuring and Using Multimeter
================================
To access Multimeter, go to {nav Applications > Multimeter}.
By default, Multimeter samples 0.1% of pages. This should be a reasonable rate
for most installs, but you can increase or decrease the rate by adjusting
`debug.sample-rate`. Increasing the rate (by setting the value to a lower
number, like 100, to sample 1% of pages) will increase the granularity of the
data, at a small performance cost.
Using Multimeter
================
-Multimeter shows you what Phabricator has spent time doing recently. By
+Multimeter shows you what Phorge has spent time doing recently. By
looking at the samples it collects, you can identify major sources of load
or resource use, whether they are specific users, pages, subprocesses, or
other types of activity.
By identifying and understanding unexpected load, you can adjust usage patterns
or configuration to make better use of resources (for example, rewrite bots
that are making too many calls), or report specific, actionable issues to the
upstream for resolution.
-The main screen of Multimeter shows you everything Phabricator has spent
+The main screen of Multimeter shows you everything Phorge has spent
resources on recently, broken down by action type. Categories are folded up
by default, with "(All)" labels.
To filter by a dimension, click the link for it. For example, from the main
page, you can click "Web Request" to filter by only web requests. To expand a
grouped dimension, click the "(All)" link.
For example, suppose we suspect that someone is running a bot that is making
a lot of requests and consuming a lot of resources. We can get a better idea
about this by filtering the results like this:
- Click {nav Web Request}. This will show only web requests.
- Click {nav (All)} under "Viewer". This will expand events by viewer.
Recent resource costs for web requests are now shown, grouped and sorted by
user. The usernames in the "Viewer" column show who is using resources, in
order from greatest use to least use (only administrators can see usernames).
The "Avg" column shows the average cost per event, while the "Cost" column
shows the total cost.
If the top few users account for similar costs and are normal, active users,
there may be nothing amiss and your problem might lie elsewhere. If a user like
`slowbot` is in the top few users and has way higher usage than anyone else,
there might be a script running under that account consuming a disproportionate
amount of resources.
Assuming you find a user with unusual usage, you could dig into their usage
like this:
- Click their name (like {nav slowbot}) to filter to just their requests.
- Click {nav (All)} under "Label". This expands by request detail.
This will show exactly what they spent those resources doing, and can help
identify if they're making a lot of API calls or scraping the site or whatever
else.
This is just an example of a specific kind of problem that Multimeter could
help resolve. In general, exploring Multimeter data by filtering and expanding
resource uses can help you understand how resources are used and identify
unexpected uses of resources. For example:
- Identify a problem with load balancing by filtering on {nav Web Request}
and expanding on {nav Host}. If hosts aren't roughly even, DNS or a load
balancer are misconfigured.
- Identify which pages cost the most by filtering on {nav Web Request}
and expanding on {nav Label}.
- Find outlier pages by filtering on {nav Web Request} and expanding on
{nav ID}.
- Find where subprocess are invoked from by filtering on {nav Subprocesses},
then expanding on {nav Context}.
Next Steps
==========
Continue by:
- understanding and reporting performance issues with
@{article:Troubleshooting Performance Problems}.
diff --git a/src/docs/user/userguide/phame.diviner b/src/docs/user/userguide/phame.diviner
index d42ddc7d23..2889f5240a 100644
--- a/src/docs/user/userguide/phame.diviner
+++ b/src/docs/user/userguide/phame.diviner
@@ -1,121 +1,121 @@
@title Phame User Guide
@group userguide
Phame is a blogging platform.
Overview
========
Phame is a simple platform for writing blogs and blog posts. Content published
-through Phame is integrated with other Phabricator applications (like Feed,
+through Phame is integrated with other Phorge applications (like Feed,
Herald and Dashboards).
You can use Phame to write and publish posts on any topic. You might use it to
make announcements, hold discussions, or provide progress updates about a
project.
-In the upstream, we use several Phame blogs to discuss changes to Phabricator,
+In the upstream, we use several Phame blogs to discuss changes to Phorge,
make company announcements, photograph food, and provide visionary thought
leadership.
Blogs
=====
To get started with Phame, create a blog. Blogs can be personal or edited
by a group: the **Editable By** policy controls who is allowed to write new
posts.
You can provide a title, subtitle, and description to help users understand
the role and purpose of the blog.
After creating a blog, you can optionally provide a header image (a large
image shown on the main blog page, like a beautiful photograph of food) and
a picture (a small logo or profile image shown in various places in the UI to
help identify the blog).
Blogs can also be hosted externally. See "External Blogs", below, for
more information.
Posts
=====
After creating a blog, you're ready to write your first post. You can navigate
to the blog and choose {nav Write Post} to get started.
Posts have a **Visibility** field which controls who can see them. The options
are:
- **Published**: Anyone who can see the blog will be able to read the post.
- **Draft**: Allows you to work on posts before publishing them. Only users
who can edit the blog will be able to see the post.
- **Archived**: Allows you to remove old posts. Only users who can edit
the blog will be able to see the post, and it won't appear in the pending
drafts list.
After publishing a post, it will appear on the blog and on the Phame home page
for all users who can see it.
Using Phame With Other Applications
===================================
-Phame integrates with other Phabricator applications, so you can do a few
+Phame integrates with other Phorge applications, so you can do a few
interesting things:
**Dashboards**: You can create a dashboard panel which shows posts on a
particular blog, then put the panel on the homepage or a custom dashboard.
This is an easy way to create a list of recent announcements.
**Herald**: You can use Herald rules to make sure you get notified whenever
your favorite author publishes a new post.
**Remarkup**: You can reference a blog post in any other application using the
`J123` monogram for the post, or embed a more detailed link with `{J123}`.
(We ran out of letters a while ago, but thinking about **j**ournal may be
helpful in remembering this.)
External Blogs
==============
WARNING: This feature is still a prototype and has some known issues.
You can host a Phame blog on an external domain, like `blog.mycompany.com`. The
Phacility corporate blog is an example of an external Phame blog:
> https://blog.phacility.com/
External blogs are public (they do not require login) and are only supported if
-your Phabricator install is also public. You can make an install public by
+your Phorge install is also public. You can make an install public by
adjusting `policy.allow-public` in Config, but make sure you understand the
effects of adjusting this setting before touching it.
Once you've made your install public, configure the blog that you want to host
like this:
- **View Policy**: Set the "View Policy" for the blog to "Public". Blogs must
have a public view policy to be served from an external domain.
- **Full Domain URI**: Set this to the full URI of your external domain,
- like `https://blog.mycompany.com/`. When users visit this URI, Phabricator
+ like `https://blog.mycompany.com/`. When users visit this URI, Phorge
will serve the blog to them.
To configure the blog's navigation breadcrumbs so that it links back to the
right parent site, set these options:
- **Parent Site Name**: Put the parent site name here (like "MyCompany").
- **Parent Site URI**: Put the parent site URI here (like
`https://www.mycompany.com`).
Configuring these options will add a new breadcrumb to the navigation to let
users return to the blog's parent site. It will look something like this:
- {nav My Company > Blog Name}
-Finally, configure DNS for `blog.mycompany.com` to point at Phabricator.
+Finally, configure DNS for `blog.mycompany.com` to point at Phorge.
If everything is set up properly, visiting `blog.mycompany.com` should now
serve your blog.
diff --git a/src/docs/user/userguide/profile_menu.diviner b/src/docs/user/userguide/profile_menu.diviner
index 88eb32d3ff..a7e48a6924 100644
--- a/src/docs/user/userguide/profile_menu.diviner
+++ b/src/docs/user/userguide/profile_menu.diviner
@@ -1,168 +1,168 @@
@title Profile Menu User Guide
@group userguide
Master profile menus for projects and other objects.
Overview
========
Some objects, like projects, have customizable menus called "profile menus".
This guide discusses how to add, remove, reorder, configure and extend these
menus.
Supported Applications
======================
These applications currently support profile menus:
| Application | Customization | Support |
|-----|-----|-----|
| Home | Global/Personal | Full |
| Projects | Per Project | Full |
| Favorites Menu | Global/Personal | Full |
| People | None | //Read-Only// |
Editing Menus
=============
You can only edit an object's menu if you can edit the object. For example, you
must have permission to edit a project in order to reconfigure the menu for the
project.
To edit a menu, click {nav icon="cogs", name="Manage"} or {nav icon="pencil",
name="Edit ..."} in the menu itself. If you are an administrator and the
application supports Global/Personal customization, you'll have the option
of editing either the Global settings or your own Personal menu, otherwise click
{nav icon="th-list", name="Edit Menu"}. This brings you to the menu
configuration interface which allows you to add and remove items, reorder the
menu, edit existing items, and choose a default item.
Menus are comprised of a list of items. Some of the items are builtin
(for example, projects have builtin "Profile", "Workboard" and "Members"
items). You can also add custom items. Builtin and custom items mostly
behave in similar ways, but there are a few exceptions (for example, you can
not completely delete builtin items).
Adding Items
============
To add new items to a menu, use {nav icon="cog", name="Configure Menu"} and
choose a type of item to add. See below for more details on available items.
You can also find a link to this documentation in the same menu, if you want
to reference it later.
Reordering Items
================
To reorder items, drag and drop them to the desired position. Your changes
will be reflected in the item ordering in the menu.
Setting a Default
=================
The default item controls what content is shown when a user browses to the
object's main page. For example, the default item for a project controls where
the user ends up when they click a link to the project from another
application.
To choose a default item, click {nav icon="thumb-tack", name="Make Default"}.
Not all kinds of items can be set as the default item. For example, you can not
set a separator line as a default because the item can't be selected and has no
content.
If no default is explicitly selected, or a default is deleted or disabled, the
first item which is eligible to be a default is used as the default item.
Removing Items
==============
To remove items, click the {nav icon="times", name="Delete"} action.
Builtin items can not be deleted and have a
{nav icon="times", name="Disable"} action instead, which will hide them but
not delete them. You an re-enable a disabled item with the
{nav icon="plus", name="Enable"} action.
A few items can not be hidden or deleted. For example, the
{nav icon="cogs", name="Manage"} item must always be available in the menu
because if you hid it by accident there would no longer be a way to access
the configuration interface and fix the mistake.
Removing or hiding an item does not disable the underlying functionality.
For example, if you hide the "Members" item for a project, that just removes
it from the menu. The project still has members, and users can still navigate
to the members page by following a link to it from elsewhere in the application
or entering the URI manually.
Editing Items
=============
To edit an item, click the name of the item. This will show you available
configuration for the item and allow you to edit it.
Which properties are editable depends on what sort of item you are editing.
Most items can be renamed, and some items have more settings available. For
example, when editing a link, you can choose the link target and select an
icon for the item.
A few items have no configuration. For example, visual separator lines are
purely cosmetic and have no available settings.
Available Items
===============
When you add items, you can choose between different types of items to add.
Which item types are available depends on what sort of object you are editing
the menu for, but most objects support these items:
- {icon minus} **Divider**: Adds a visual separator to the menu. This is
purely cosmetic.
- {icon map-marker} **Label**: Lets you label sections of menu items.
This is also purely cosmetic.
- {icon link} **Link**: Allows you to create an item which links to
- somewhere else in Phabricator, or to an external site.
+ somewhere else in Phorge, or to an external site.
- {icon plus} **Form**: Provides quick access to custom and built-in forms
from any application that supports EditEngine.
- {icon briefcase} **Projects**: Provides quick access to a project.
- {icon globe} **Applications**: Provides quick access to your favorite
applications. Can be renamed.
- {icon tachometer} **Dashboard**: Provides quick access to your favorite
dashboard. These items will display with the current nav on the item
you've attached it to.
To learn more about how an item works, try adding it. You can always delete
it later if it doesn't do what you wanted.
Dashboard Integration
=====================
Dashboards are directly integrated with Profile Menus. If you add a Dashboard
to a Project or to a Home menu, that Dashboard will be presented in the
context of that menu. This allows customization of different pages of content
without having the user leave Home or the Project.
To use a Dashboard to replace the default Home menu, install it as a Global
Menu Item and move it to the topmost item. By default, the first Dashboard
the menu renders will be selected as the default. Users that modify their
personal Home menu, will have their topmost Dashboard be their default,
overriding the Global settings.
Writing New Item Types
======================
IMPORTANT: This feature is not stable, and the API is subject to change.
To add new types of items, subclass @{class:PhabricatorProfileMenuItem}.
diff --git a/src/docs/user/userguide/projects.diviner b/src/docs/user/userguide/projects.diviner
index 4e3ab3616f..0d9dd2aaa2 100644
--- a/src/docs/user/userguide/projects.diviner
+++ b/src/docs/user/userguide/projects.diviner
@@ -1,339 +1,339 @@
@title Projects User Guide
@group userguide
Organize users and objects with projects.
Overview
========
NOTE: This document is only partially complete.
-Phabricator projects are flexible, general-purpose groups of objects that you
+Phorge projects are flexible, general-purpose groups of objects that you
can use to organize information. Projects have some basic information like
a name and an icon, and may optionally have members.
For example, you can create projects to provide:
- **Organization**: Create a project to represent a product or initiative,
then use it to organize related work.
- **Groups**: Create a project to represent a group of people (like a team),
then add members of the group as project members.
- **Tags**: To create a tag, just create a project without any members. Then
tag anything you want.
- **Access Control Lists**: Add members to a project, then restrict the
visibility of objects to members of that project. See "Understanding
Policies" below to understand how policies and projects interact in
more detail.
Understanding Policies
======================
An important rule to understand about projects is that **adding or removing
projects to an object never affects who can see the object**.
For example, if you tag a task with a project like {nav Backend}, that does not
change who can see the task. In particular, it does not limit visibility to
only members of the "Backend" project, nor does it allow them to see it if they
otherwise could not. Likewise, removing projects does not affect visibility.
If you're familiar with other software that works differently, this may be
-unexpected, but the rule in Phabricator is simple: **adding and removing
+unexpected, but the rule in Phorge is simple: **adding and removing
projects never affects policies.**
Note that you //can// write policy rules which restrict capabilities to members
of a specific project or set of projects, but you do this by editing an
object's policies and adding rules based on project membership, not by tagging
or untagging the object with projects.
To manage who can seen an object, use the object's policy controls,
Spaces (see @{article:Spaces User Guide}) and Custom Forms
(see @{article:User Guide: Customizing Forms}).
For more details about rationale, see "Policies In Depth", below.
Joining Projects
================
Once you join a project, you become a member and will receive mail sent to the
project, like a mailing list. For example, if a project is added as a
subscriber on a task or a reviewer on a revision, you will receive mail about
that task or revision.
If you'd prefer not to receive mail sent to a project, you can go to
{nav Members} and select {nav Disable Mail}. If you disable mail for a project,
you will no longer receive mail sent to the project.
Watching Projects
=================
Watching a project allows you to closely follow all activity related to a
project.
You can **watch** a project by clicking {nav Watch Project} on the project
page. To stop watching a project, click {nav Unwatch Project}.
When you watch a project, you will receive a copy of mail about any objects
(like tasks or revisions) that are tagged with the project, or that the project
is a subscriber, reviewer, or auditor for. For moderately active projects, this
may be a large volume of mail.
Edit Notifications
==================
Edit notifications are generated when project details (like the project
description, name, or icon) are updated, or when users join or leave projects.
By default, these notifications are are only sent to the acting user. These
notifications are usually not very interesting, and project mail is already
complicated by members and watchers.
If you'd like to receive edit notifications for a project, you can write a
Herald rule to keep you in the loop.
Customizing Menus
=================
Projects support profile menus, which are customizable. For full details on
managing and customizing profile menus, see @{article:Profile Menu User Guide}.
Here are some examples of common ways to customize project profile menus that
may be useful:
**Link to Tasks or Repositories**: You can add a menu item for "Open Tasks" or
"Active Repositories" for a project by running the search in the appropriate
application, then adding a link to the search results to the menu.
This can let you quickly jump from a project screen to related tasks,
revisions, repositories, or other objects.
For more details on how to use search and manage queries, see
@{article:Search User Guide}.
**New Task Button**: To let users easily make a new task that is tagged with
the current project, add a link to the "New Task" form with the project
prefilled, or to a custom form with appropriate defaults.
For information on customizing and prefilling forms, see
@{article:User Guide: Customizing Forms}.
**Link to Wiki Pages**: You can add links to relevant wiki pages or other
documentation to the menu to make it easy to find and access. You could also
link to a Conpherence if you have a chatroom for a project.
**Link to External Resources**: You can link to external resources outside
-of Phabricator if you have other pages which are relevant to a project.
+of Phorge if you have other pages which are relevant to a project.
**Set Workboard as Default**: For projects that are mostly used to organize
tasks, change the default item to the workboard instead of the profile to get
to the workboard view more easily.
**Hide Unused Items**: If you have a project which you don't expect to have
members or won't have a workboard, you can hide these items to streamline the
menu.
Subprojects and Milestones
==========================
IMPORTANT: This feature is only partially implemented.
After creating a project, you can use the
{nav icon="sitemap", name="Subprojects"} menu item to add subprojects or
milestones.
**Subprojects** are projects that are contained inside the main project. You
can use them to break large or complex groups, tags, lists, or undertakings
apart into smaller pieces.
**Milestones** are a special kind of subproject for organizing tasks into
blocks of work. You can use them to implement sprints, iterations, milestones,
versions, etc.
Subprojects and milestones have some additional special behaviors and rules,
particularly around policies and membership. See below for details.
This is a brief summary of the major differences between normal projects,
subprojects, parent projects, and milestones.
| | Normal | Parent | Subproject | Milestone |
|---|---|---|---|---|
| //Members// | Yes | Union of Subprojects | Yes | Same as Parent |
| //Policies// | Yes | Yes | Affected by Parent | Same as Parent |
| //Hashtags// | Yes | Yes | Yes | Special |
Subprojects
===========
Subprojects are full-power projects that are contained inside some parent
project. You can use them to divide a large or complex project into smaller
parts.
Subprojects have normal members and normal policies, but note that the policies
of the parent project affect the policies of the subproject (see "Parent
Projects", below).
Subprojects can have their own subprojects, milestones, or both. If a
subproject has its own subprojects, it is both a subproject and a parent
project. Thus, the parent project rules apply to it, and are stronger than the
subproject rules.
Subprojects can have normal workboards.
The maximum subproject depth is 16. This limit is intended to grossly exceed
the depth necessary in normal usage.
Objects may not be tagged with multiple projects that are ancestors or
descendants of one another. For example, a task may not be tagged with both
{nav Stonework} and {nav Stonework > Masonry}.
When a project tag is added that is the ancestor or descendant of one or more
existing tags, the old tags are replaced. For example, adding
{nav Stonework > Masonry} to a task tagged with {nav Stonework} will replace
{nav Stonework} with the newer, more specific tag.
This restriction does not apply to projects which share some common ancestor
but are not themselves mutual ancestors. For example, a task may be tagged
with both {nav Stonework > Masonry} and {nav Stonework > Sculpting}.
This restriction //does// apply when the descendant is a milestone. For
example, a task may not be tagged with both {nav Stonework} and
{nav Stonework > Iteration II}.
Milestones
==========
Milestones are simple subprojects for tracking sprints, iterations, versions,
or other similar blocks of work. Milestones make it easier to create and manage
a large number of similar subprojects (for example: {nav Sprint 1},
{nav Sprint 2}, {nav Sprint 3}, etc).
Milestones can not have direct members or policies. Instead, the membership
and policies of a milestones are always the same as the milestone's parent
project. This makes large numbers of milestones more manageable when changes
occur.
Milestones can not have subprojects, and can not have their own milestones.
By default, Milestones do not have their own hashtags.
Milestones can have normal workboards.
Objects may not be tagged with two different milestones of the same parent
project. For example, a task may not be tagged with both {nav Stonework >
Iteration III} and {nav Stonework > Iteration V}.
When a milestone tag is added to an object which already has a tag from the
same series of milestones, the old tag is removed. For example, adding the
{nav Stonework > Iteration V} tag to a task which already has the
{nav Stonework > Iteration III} tag will remove the {nav Iteration III} tag.
This restriction does not apply to milestones which are not part of the same
series. For example, a task may be tagged with both
{nav Stonework > Iteration V} and {nav Heraldry > Iteration IX}.
Parent Projects
===============
When you add the first subproject to an existing project, it is converted into
a **parent project**. Parent projects have some special rules.
**No Direct Members**: Parent projects can not have members of their own.
Instead, all of the users who are members of any subproject count as members
of the parent project. By joining (or leaving) a subproject, a user is
implicitly added to (or removed from) all ancestors of that project.
Consequently, when you add the first subproject to an existing project, all of
the project's current members are moved to become members of the subproject
instead. Implicitly, they will remain members of the parent project because the
parent project is an ancestor of the new subproject.
You can edit the project afterward to change or remove members if you want to
split membership apart in a more granular way across multiple new subprojects.
**Searching**: When you search for a parent project, results for any subproject
are returned. For example, if you search for {nav Engineering}, your query will
match results in {nav Engineering} itself, but also subprojects like
{nav Engineering > Warp Drive} and {nav Engineering > Shield Batteries}.
**Policy Effects**: To view a subproject or milestone, you must be able to
view the parent project. As a result, the parent project's view policy now
affects child projects. If you restrict the visibility of the parent, you also
restrict the visibility of the children.
In contrast, permission to edit a parent project grants permission to edit
any subproject. If a user can {nav Root Project}, they can also edit
{nav Root Project > Child} and {nav Root Project > Child > Sprint 3}.
Policies In Depth
=================
As discussed above, adding and removing projects never affects who can see an
object. This is an explicit product design choice aimed at reducing the
complexity of policy management.
-Phabricator projects are a flexible, general-purpose, freeform tool. This is a
+Phorge projects are a flexible, general-purpose, freeform tool. This is a
good match for many organizational use cases, but a very poor match for
policies. It is important that policies be predictable and rigid, because the
cost of making a mistake with policies is high (inadvertent disclosure of
private information).
-In Phabricator, each object (like a task) can be tagged with multiple projects.
+In Phorge, each object (like a task) can be tagged with multiple projects.
This is important in a flexible organizational tool, but is a liability in a
policy tool.
If each project potentially affected visibility, it would become more difficult
to predict the visibility of objects and easier to make mistakes with policies.
There are different, reasonable expectations about how policies might be
affected when tagging objects with projects, but these expectations are in
conflict, and different users have different expectations. For example:
- if a user adds a project like {nav Backend} to a task, their intent
might be to //open// the task up and share it with the "Backend" team;
- if a user adds a project like {nav Security Vulnerability} to a task,
their intent might be to //close// the task down and restrict it to just
the security team;
- if a user adds a project like {nav Easy Starter Task} to a task, their
intent might be to not affect policies at all;
- if a user adds {nav Secret Inner Council} to a task already tagged with
{nav Security Vulnerability}, their intent might be to //open// the task
to members of //either// project, or //close// the task to just members of
//both// projects;
- if a user adds {nav Backend} to a task already tagged with
{nav Security Vulnerability}, their intent is totally unclear;
- in all cases, users may be adding projects purely to organize objects
without intending to affect policies.
We can't distinguish between these cases without adding substantial complexity,
and even if we made an attempt to navigate this it would still be very
difficult to predict the effect of tagging an object with multiple
policy-affecting projects. Users would need to learn many rules about how these
policy types interacted to predict the policy effects of adding or removing a
project.
Because of the implied complexity, we almost certainly could not prevent some
cases where a user intends to take a purely organizational action (like adding
a {nav Needs Documentation} tag) and accidentally opens a private object to a
wide audience. The policy system is intended to make these catastrophically bad
cases very difficult, and allowing projects to affect policies would make these
mistakes much easier to make.
We believe the only reasonable way we could reduce ambiguity and complexity is
by making project policy actions explicit and rule-based. But we already have a
system for explicit, rule-based management of policies: the policy system. The
policy tools are designed for policy management and aimed at making actions
explicit and mistakes very difficult.
Many of the use cases where project-based access control seems like it might be
a good fit can be satisfied with Spaces instead (see @{article:Spaces User
Guide}). Spaces are explicit, unambiguous containers for groups of objects with
similar policies.
Form customization also provides a powerful tool for making many policy
management tasks easier (see @{article:User Guide: Customizing Forms}).
diff --git a/src/docs/user/userguide/prototypes.diviner b/src/docs/user/userguide/prototypes.diviner
index c84bad1178..52ae25bf75 100644
--- a/src/docs/user/userguide/prototypes.diviner
+++ b/src/docs/user/userguide/prototypes.diviner
@@ -1,32 +1,32 @@
@title User Guide: Prototype Applications
@group userguide
Information about prototypes.
Overview
========
-Phabricator includes //prototype applications//, which are applications in an
+Phorge includes //prototype applications//, which are applications in an
early stage of development.
When we begin working on a new application, we usually implement it as a
prototype first. This allows us to get a better sense of how the application
might work and integrate with other applications, and what technical and product
challenges it might face.
Prototypes are often not generally usable. They usually aren't documented,
don't have safety and correctness checks in place, very rarely have full
integrations or APIs, and may be missing major pieces of critical functionality
or even not work at all. The parts of an application we prototype first
may not be the useful parts.
Some applications leave the prototype phase quickly, but other applications may
not. We build some prototypes just to make sure a specific technical barrier is
surmountable, and may not finish the application for a very long time. In other
cases, the prototype seems less interesting or useful once it starts working
than we might have imagined it would be, or turns out to be far more
challenging than we thought. We may lower the priority of a project or put it
on hold indefinitely if we're less excited about it after we begin building it.
If you're interested in previewing upcoming applications, you can use the
`phabricator.show-prototypes` configuration setting to enable prototypes.
diff --git a/src/docs/user/userguide/remarkup.diviner b/src/docs/user/userguide/remarkup.diviner
index f96c8c7b25..4fb8fc75f8 100644
--- a/src/docs/user/userguide/remarkup.diviner
+++ b/src/docs/user/userguide/remarkup.diviner
@@ -1,734 +1,734 @@
@title Remarkup Reference
@group userguide
Explains how to make bold text; this makes your words louder so you can win
arguments.
= Overview =
-Phabricator uses a lightweight markup language called "Remarkup", similar to
+Phorge uses a lightweight markup language called "Remarkup", similar to
other lightweight markup languages like Markdown and Wiki markup.
This document describes how to format text using Remarkup.
= Quick Reference =
All the syntax is explained in more detail below, but this is a quick guide to
formatting text in Remarkup.
These are inline styles, and can be applied to most text:
**bold** //italic// `monospaced` ##monospaced## ~~deleted~~ __underlined__
!!highlighted!!
D123 T123 rX123 # Link to Objects
{D123} {T123} # Link to Objects (Full Name)
{F123} # Embed Images
{M123} # Embed Pholio Mock
@username # Mention a User
#project # Mention a Project
[[wiki page]] # Link to Phriction
[[wiki page | name]] # Named link to Phriction
http://xyz/ # Link to web
[[http://xyz/ | name]] # Named link to web
[name](http://xyz/) # Alternate Link
These are block styles, and must be separated from surrounding text by
empty lines:
= Large Header =
== Smaller Header ==
## This is a Header As Well
Also a Large Header
===================
Also a Smaller Header
---------------------
> Quoted Text
Use `- ` or `* ` for bulleted lists, and `# ` for numbered lists.
Use ``` or indent two spaces for code.
Use %%% for a literal block.
Use | ... | ... for tables.
= Basic Styling =
Format **basic text styles** like this:
**bold text**
//italic text//
`monospaced text`
##monospaced text##
~~deleted text~~
__underlined text__
!!highlighted text!!
Those produce **bold text**, //italic text//, `monospaced text`, ##monospaced
text##, ~~deleted text~~, __underlined text__, and !!highlighted text!!
respectively.
= Layout =
Make **headers** like this:
= Large Header =
== Smaller Header ==
===== Very Small Header =====
Alternate Large Header
======================
Alternate Smaller Header
------------------------
You can optionally omit the trailing `=` signs -- that is, these are the same:
== Smaller Header ==
== Smaller Header
This produces headers like the ones in this document. Make sure you have an
empty line before and after the header.
Lists
=====
Make **lists** by beginning each item with a `-` or a `*`:
lang=text
- milk
- eggs
- bread
* duck
* duck
* goose
This produces a list like this:
- milk
- eggs
- bread
(Note that you need to put a space after the `-` or `*`.)
You can make numbered lists with a `#` instead of `-` or `*`:
# Articuno
# Zapdos
# Moltres
Numbered lists can also be started with `1.` or `1)`. If you use a number other
than `1`, the list will start at that number instead. For example, this:
```
200) OK
201) Created
202) Accepted
```
...produces this:
200) OK
201) Created
202) Accepted
You can also nest lists:
```- Body
- Head
- Arm
- Elbow
- Hand
# Thumb
# Index
# Middle
# Ring
# Pinkie
- Leg
- Knee
- Foot```
...which produces:
- Body
- Head
- Arm
- Elbow
- Hand
# Thumb
# Index
# Middle
# Ring
# Pinkie
- Leg
- Knee
- Foot
If you prefer, you can indent lists using multiple characters to show indent
depth, like this:
```- Tree
-- Branch
--- Twig```
As expected, this produces:
- Tree
-- Branch
--- Twig
You can add checkboxes to items by prefacing them with `[ ]` or `[X]`, like
this:
```
- [X] Preheat oven to 450 degrees.
- [ ] Zest 35 lemons.
```
When rendered, this produces:
- [X] Preheat oven to 450 degrees.
- [ ] Zest 35 lemons.
Make **code blocks** by indenting two spaces:
f(x, y);
You can also use three backticks to enclose the code block:
```f(x, y);
g(f);```
You can specify a language for syntax highlighting with `lang=xxx`:
lang=text
lang=html
<a href="#">...</a>
This will highlight the block using a highlighter for that language, if one is
available (in most cases, this means you need to configure Pygments):
lang=html
<a href="#">...</a>
You can also use a `COUNTEREXAMPLE` header to show that a block of code is
bad and shouldn't be copied:
lang=text
COUNTEREXAMPLE
function f() {
global $$variable_variable;
}
This produces a block like this:
COUNTEREXAMPLE
function f() {
global $$variable_variable;
}
You can use `lines=N` to limit the vertical size of a chunk of code, and
`name=some_name.ext` to give it a name. For example, this:
lang=text
lang=html, name=example.html, lines=12, counterexample
...
...produces this:
lang=html, name=example.html, lines=12, counterexample
<p>Apple</p>
<p>Apricot</p>
<p>Avocado</p>
<p>Banana</p>
<p>Bilberry</p>
<p>Blackberry</p>
<p>Blackcurrant</p>
<p>Blueberry</p>
<p>Currant</p>
<p>Cherry</p>
<p>Cherimoya</p>
<p>Clementine</p>
<p>Date</p>
<p>Damson</p>
<p>Durian</p>
<p>Eggplant</p>
<p>Elderberry</p>
<p>Feijoa</p>
<p>Gooseberry</p>
<p>Grape</p>
<p>Grapefruit</p>
<p>Guava</p>
<p>Huckleberry</p>
<p>Jackfruit</p>
<p>Jambul</p>
<p>Kiwi fruit</p>
<p>Kumquat</p>
<p>Legume</p>
<p>Lemon</p>
<p>Lime</p>
<p>Lychee</p>
<p>Mandarine</p>
<p>Mango</p>
<p>Mangostine</p>
<p>Melon</p>
You can use the `NOTE:`, `WARNING:` or `IMPORTANT:` elements to call attention
to an important idea.
For example, write this:
```
NOTE: Best practices in proton pack operation include not crossing the streams.
```
...to produce this:
NOTE: Best practices in proton pack operation include not crossing the streams.
Using `WARNING:` or `IMPORTANT:` at the beginning of the line changes the
color of the callout:
WARNING: Crossing the streams can result in total protonic reversal!
IMPORTANT: Don't cross the streams!
In addition, you can use `(NOTE)`, `(WARNING)`, or `(IMPORTANT)` to get the
same effect but without `(NOTE)`, `(WARNING)`, or `(IMPORTANT)` appearing in
the rendered result. For example, this callout uses `(NOTE)`:
(NOTE) Dr. Egon Spengler is the best resource for additional proton pack
questions.
Dividers
========
You can divide sections by putting three or more dashes on a line by
themselves. This creates a divider or horizontal rule similar to an `<hr />`
tag, like this one:
---
The dashes need to appear on their own line and be separated from other
content. For example, like this:
```
This section will be visually separated.
---
On an entirely different topic, ...
```
= Linking URIs =
-URIs are automatically linked: http://phabricator.org/
+URIs are automatically linked: http://phorge.it/
If you have a URI with problematic characters in it, like
"`http://comma.org/,`", you can surround it with angle brackets:
<http://comma.org/,>
This will force the parser to consume the whole URI: <http://comma.org/,>
You can also use create named links, where you choose the displayed text. These
-work within Phabricator or on the internet at large:
+work within Phorge or on the internet at large:
[[/herald/transcript/ | Herald Transcripts]]
[[http://www.boring-legal-documents.com/ | exciting legal documents]]
Markdown-style links are also supported:
[Toil](http://www.trouble.com)
= Linking to Objects =
-You can link to Phabricator objects, such as Differential revisions, Diffusion
+You can link to Phorge objects, such as Differential revisions, Diffusion
commits and Maniphest tasks, by mentioning the name of an object:
D123 # Link to Differential revision D123
rX123 # Link to SVN commit 123 from the "X" repository
rXaf3192cd5 # Link to Git commit "af3192cd5..." from the "X" repository.
# You must specify at least 7 characters of the hash.
T123 # Link to Maniphest task T123
You can also link directly to a comment in Maniphest and Differential (these
can be found on the date stamp of any transaction/comment):
T123#412 # Link to comment id #412 of task T123
-See the Phabricator configuration setting `remarkup.ignored-object-names` to
+See the Phorge configuration setting `remarkup.ignored-object-names` to
modify this behavior.
= Embedding Objects
You can also generate full-name references to some objects by using braces:
{D123} # Link to Differential revision D123 with the full name
{T123} # Link to Maniphest task T123 with the full name
These references will also show when an object changes state (for instance, a
task or revision is closed). Some types of objects support rich embedding.
== Linking to Project Tags
Projects can be linked to with the use of a hashtag `#`. This works by default
using the name of the Project (lowercase, underscored). Additionally you
can set multiple additional hashtags by editing the Project details.
#qa, #quality_assurance
== Embedding Mocks (Pholio)
You can embed a Pholio mock by using braces to refer to it:
{M123}
By default the first four images from the mock set are displayed. This behavior
can be overridden with the **image** option. With the **image** option you can
provide one or more image IDs to display.
You can set the image (or images) to display like this:
{M123, image=12345}
{M123, image=12345 & 6789}
== Embedding Pastes
You can embed a Paste using braces:
{P123}
You can adjust the embed height with the `lines` option:
{P123, lines=15}
You can highlight specific lines with the `highlight` option:
{P123, highlight=15}
{P123, highlight="23-25, 31"}
== Embedding Images
You can embed an image or other file by using braces to refer to it:
{F123}
In most interfaces, you can drag-and-drop an image from your computer into the
text area to upload and reference it.
Some browsers (e.g. Chrome) support uploading an image data just by pasting them
from clipboard into the text area.
You can set file display options like this:
{F123, layout=left, float, size=full, alt="a duckling"}
Valid options for all files are:
- **layout** left (default), center, right, inline, link (render a link
instead of a thumbnail for images)
- **name** with `layout=link` or for non-images, use this name for the link
text
- **alt** Provide alternate text for assistive technologies.
Image files support these options:
- **float** If layout is set to left or right, the image will be floated so
text wraps around it.
- **size** thumb (default), full
- **width** Scale image to a specific width.
- **height** Scale image to a specific height.
Audio and video files support these options:
- **media**: Specify the media type as `audio` or `video`. This allows you
to disambiguate how file format which may contain either audio or video
should be rendered.
- **loop**: Loop this media.
- **autoplay**: Automatically begin playing this media.
== Embedding Countdowns
You can embed a countdown by using braces:
{C123}
= Quoting Text =
To quote text, preface it with an `>`:
> This is quoted text.
This appears like this:
> This is quoted text.
= Embedding Media =
If you set a configuration flag, you can embed media directly in text:
- **remarkup.enable-embedded-youtube**: allows you to paste in YouTube videos
and have them render inline.
This option is disabled by default because it has security and/or
silliness implications. Carefully read the description before enabling it.
= Image Macros =
You can upload image macros (More Stuff -> Macro) which will replace text
strings with the image you specify. For instance, you could upload an image of a
dancing banana to create a macro named "peanutbutterjellytime", and then any
time you type that string on a separate line it will be replaced with the image
of a dancing banana.
= Memes =
You can also use image macros in the context of memes. For example, if you
have an image macro named `grumpy`, you can create a meme by doing the
following:
{meme, src = grumpy, above = toptextgoeshere, below = bottomtextgoeshere}
By default, the font used to create the text for the meme is `tuffy.ttf`. For
the more authentic feel of `impact.ttf`, you simply have to place the Impact
-TrueType font in the Phabricator subfolder `/resources/font/`. If Remarkup
+TrueType font in the Phorge subfolder `/resources/font/`. If Remarkup
detects the presence of `impact.ttf`, it will automatically use it.
= Mentioning Users =
In Differential and Maniphest, you can mention another user by writing:
@username
When you submit your comment, this will add them as a CC on the revision or task
if they aren't already CC'd.
Icons
=====
You can add icons to comments using the `{icon ...}` syntax. For example:
{icon camera}
This renders: {icon camera}
You can select a color for icons:
{icon camera color=blue}
This renders: {icon camera color=blue}
For a list of available icons and colors, check the UIExamples application.
(The icons are sourced from
[[ https://fontawesome.com/v4.7.0/icons/ | FontAwesome ]], so you can also
browse the collection there.)
You can add `spin` to make the icon spin:
{icon cog spin}
This renders: {icon cog spin}
= Phriction Documents =
You can link to Phriction documents with a name or path:
Make sure you sign and date your [[legal/Letter of Marque and Reprisal]]!
By default, the link will render with the document title as the link name.
With a pipe (`|`), you can retitle the link. Use this to mislead your
opponents:
Check out these [[legal/boring_documents/ | exciting legal documents]]!
Links to pages which do not exist are shown in red. Links to pages which exist
but which the viewer does not have permission to see are shown with a lock
icon, and the link will not disclose the page title.
If you begin a link path with `./` or `../`, the remainder of the path will be
evaluated relative to the current wiki page. For example, if you are writing
content for the document `fruit/` a link to `[[./guava]]` is the same as a link
to `[[fruit/guava]]` from elsewhere.
Relative links may use `../` to transverse up the document tree. From the
`produce/vegetables/` page, you can use `[[../fruit/guava]]` to link to the
`produce/fruit/guava` page.
Relative links do not work when used outside of wiki pages. For example,
you can't use a relative link in a comment on a task, because there is no
reasonable place for the link to start resolving from.
When documents are moved, relative links are not automatically updated: they
are preserved as currently written. After moving a document, you may need to
review and adjust any relative links it contains.
= Literal Blocks =
To place text in a literal block use `%%%`:
%%%Text that won't be processed by remarkup
[[http://www.example.com | example]]
%%%
Remarkup will not process the text inside of literal blocks (other than to
escape HTML and preserve line breaks).
= Tables =
Remarkup supports simple table syntax. For example, this:
```
| Fruit | Color | Price | Peel?
| ----- | ----- | ----- | -----
| Apple | red | `$0.93` | no
| Banana | yellow | `$0.19` | **YES**
```
...produces this:
| Fruit | Color | Price | Peel?
| ----- | ----- | ----- | -----
| Apple | red | `$0.93` | no
| Banana | yellow | `$0.19` | **YES**
Remarkup also supports a simplified HTML table syntax. For example, this:
```
<table>
<tr>
<th>Fruit</th>
<th>Color</th>
<th>Price</th>
<th>Peel?</th>
</tr>
<tr>
<td>Apple</td>
<td>red</td>
<td>`$0.93`</td>
<td>no</td>
</tr>
<tr>
<td>Banana</td>
<td>yellow</td>
<td>`$0.19`</td>
<td>**YES**</td>
</tr>
</table>
```
...produces this:
<table>
<tr>
<th>Fruit</th>
<th>Color</th>
<th>Price</th>
<th>Peel?</th>
</tr>
<tr>
<td>Apple</td>
<td>red</td>
<td>`$0.93`</td>
<td>no</td>
</tr>
<tr>
<td>Banana</td>
<td>yellow</td>
<td>`$0.19`</td>
<td>**YES**</td>
</tr>
</table>
Some general notes about this syntax:
- your tags must all be properly balanced;
- your tags must NOT include attributes (`<td>` is OK, `<td style="...">` is
not);
- you can use other Remarkup rules (like **bold**, //italics//, etc.) inside
table cells.
Navigation Sequences
====================
You can use `{nav ...}` to render a stylized navigation sequence when helping
someone to locate something. This can be useful when writing documentation.
For example, you could give someone directions to purchase lemons:
{nav icon=home, name=Home >
Grocery Store >
Produce Section >
icon=lemon-o, name=Lemons}
To render this example, use this markup:
```
{nav icon=home, name=Home >
Grocery Store >
Produce Section >
icon=lemon-o, name=Lemons}
```
In general:
- Separate sections with `>`.
- Each section can just have a name to add an element to the navigation
sequence, or a list of key-value pairs.
- Supported keys are `icon`, `name`, `type` and `href`.
- The `type` option can be set to `instructions` to indicate that an element
is asking the user to make a choice or follow specific instructions.
Keystrokes
==========
You can use `{key ...}` to render a stylized keystroke. For example, this:
```
Press {key M} to view the starmap.
```
...renders this:
> Press {key M} to view the starmap.
You can also render sequences with modifier keys. This:
```
Use {key command option shift 3} to take a screenshot.
Press {key down down-right right LP} to activate the hadoken technique.
```
...renders this:
> Use {key command option shift 3} to take a screenshot.
> Press {key down down-right right LP} to activate the hadoken technique.
Anchors
========
You can use `{anchor #xyz}` to create a document anchor and later link to
it directly with `#xyz` in the URI.
Headers also automatically create named anchors.
If you navigate to `#xyz` in your browser location bar, the page will scroll
to the first anchor with "xyz" as a prefix of the anchor name.
= Fullscreen Mode =
Remarkup editors provide a fullscreen composition mode. This can make it easier
to edit large blocks of text, or improve focus by removing distractions. You can
exit **Fullscreen** mode by clicking the button again or by pressing escape.
diff --git a/src/docs/user/userguide/reviews_vs_audit.diviner b/src/docs/user/userguide/reviews_vs_audit.diviner
index 642683840c..bf6afb64b4 100644
--- a/src/docs/user/userguide/reviews_vs_audit.diviner
+++ b/src/docs/user/userguide/reviews_vs_audit.diviner
@@ -1,143 +1,143 @@
@title User Guide: Review vs Audit
@group userguide
Discusses the differences between "review" and "audit" workflows.
Overview
========
-Phabricator supports two similar but separate code review workflows: "review"
+Phorge supports two similar but separate code review workflows: "review"
and "audit".
Review occurs in **Differential**, before changes are published. You can learn
more in @{article:Differential User Guide}.
Audit occurs in **Diffusion**, after changes are published. You can learn more
in @{article:Audit User Guide}.
When this documentation discusses "unpublished changes", it refers to changes
which are still subject to being reworked in response to feedback. In many
workflows, these changes will only exist locally on the developer's machine,
but some workflows push tentative or temporary changes into remotes. The step
that "publishes" changes might be either pushing or merging them, depending on
your workflow.
Both the audit and review workflows are lightweight, asynchronous web-based
workflows where reviewers or auditors inspect code independently, from their
own machines -- not synchronous review sessions where authors and reviewers
meet in person to discuss changes.
Broadly, review is normally a //blocking// workflow: in review workflows,
authors usually can not publish changes until review completes and reviewers
are satisfied.
In contrast, audit is normally a //nonblocking// workflow: in audit workflows,
changes usually move forward by default.
Advantages of Review
====================
Pre-publish review is significantly more powerful than post-publish auditing.
You gain these advantages by requiring review //before// changes may be
published:
- Authors have a strong incentive to craft small, well-formed changes that
will be readily understood, to explain them adequately, and to provide
appropriate test plans, test coverage and context.
- Reviewers have a real opportunity to make significant suggestions about
architecture or approach in review. These suggestions are less attractive
to adopt from audit, and may be much more difficult to adopt if significant
time has passed between publish and audit.
- Authors have a strong incentive to fix problems and respond to feedback
received during review because it blocks them. Authors have a much weaker
incentive to promptly address problems raised during audit.
- Authors can ask reviewers to apply and verify fixes before they are
published.
- Authors can easily pursue feedback early, and get course corrections on
approach or direction.
- Reviewers are better prepared to support a given change once it is in
production, having already had a chance to become familiar with and reason
through the code.
- Reviewers are able to catch problems which automated tests may have
difficulty detecting. For example, human reviewers are able to reason about
performance problems that tests can easily miss because they run on
small datasets and stub out service calls.
- Communicating about changes //before// they happen generally leads to better
preparation for their effects.
The theoretical cost of review is that it slows down development by introducing
a blocking step into the process and generally wastes developer time that could
be better spent developing. This is less true than it appears, because the costs
are low and pay for themselves in other ways:
- Differential is fast and provides a lightweight process for submitting
code for review and for performing review.
- Authors are free to pursue other changes while code is being reviewed. With
appropriate change management (like local branching in Git) they can even
pursue dependent changes easily. Authors should rarely if ever be blocked on
review, even though an individual change is blocked until it is approved.
- The workflow as a whole is lightweight and, with skillful reviewers,
effective at identifying bugs. It is generally faster to fix bugs in review
than in production.
- More importantly, it is effective at identifying problems with architecture
and approach. These are free to fix in review ("don't do this, it is a bad
idea") and may be very time consuming to fix in production. No matter how
good your test suite is, it can't identify solutions which are poor because
of missing context, or miscommunication, or which are simply bad ideas.
- Changes which are too large or too complicated to be reviewed quickly are
often //too large and too complicated, period//. Nearly all large changes
can be split apart into small, independent pieces which are easier to
understand and test. Review tends to encourage smaller and better-factored
changes.
- Review can be integrated with static analysis which can detect (and,
in many cases, correct) mechanical problems with code like syntax,
formatting, naming conventions, style problems, misspellings, and some
program errors. This reduces the amount of time it takes to review code,
and means reviewers can focus on actual problems with the code rather than
minor stylistic issues.
- Review creates a permanent record of context and intent which explains why
a change was made, generally with much more information than commit messages
alone (authors have an incentive to properly explain a change when sending
it for review). This makes it easier to understand code later, and to
respond appropriately when it breaks.
- With `arc patch`, it is roughly as easy to pull a change out of Differential
as it is to pull it out of the remote.
Advantages of Audit
===================
Post-publish audit is a less powerful workflow than pre-publish review, but can
supplement review and is better than nothing on its own. If you are unpersuaded
by the arguments above (or work on a team that is unswayed), audits provide
some of the benefits of review with less friction:
- - Audits are driven entirely by Phabricator: users do not need to install
+ - Audits are driven entirely by Phorge: users do not need to install
`arc`.
- Audits require little adjustment to existing workflows and little training.
- Audits are completely nonblocking, and send fewer notifications than review.
- Even if you have review, audits can be useful as a supplement to keep tabs
on lower-importance changes or raise issues that are discovered after
review.
Recommendations
===============
Here are super biased recommendations from developers of code review software:
- If you can do review, do it. Supplement it with audits for less important
changes as your organization scales.
- If you can't do review immediately, set up audits and try to transition
toward review. Some types of changes (like tentative changes or requests
for feedback about code) are a naturally good fit for review and can serve
as a stepping stone toward broader acceptance. Greater familiarity with the
toolset may also foster more acceptance toward review, and the value of
review may become more obvious as the organization scales (e.g., once you
get interns).
- If you aren't interested in review, just do audits. You can always
change your mind later. But consider review! It's really good, we promise!
Next Steps
==========
- Learn more about reviews in @{article:Differential User Guide}; or
- learn more about audits in @{article:Audit User Guide}.
diff --git a/src/docs/user/userguide/search.diviner b/src/docs/user/userguide/search.diviner
index 9276298f31..b10e4c147c 100644
--- a/src/docs/user/userguide/search.diviner
+++ b/src/docs/user/userguide/search.diviner
@@ -1,193 +1,193 @@
@title Search User Guide
@group userguide
-Introduction to searching for documents in Phabricator.
+Introduction to searching for documents in Phorge.
Overview
========
-Phabricator has two major ways to search for documents and objects (like tasks,
+Phorge has two major ways to search for documents and objects (like tasks,
code reviews, users, wiki documents, and so on): **global search** and
**application search**.
**Global search** allows you to search across multiple document types at once,
but has fewer options for refining a search. It's a good general-purpose
search, and helpful if you're searching for a text string.
**Application search** allows you to search within an application (like
Maniphest) for documents of a specific type. Because application search is only
searching one type of object, it can provide more powerful options for
filtering, ordering, and displaying the results.
Both types of search share many of the same features. This document walks
through how to use search and how to take advantage of some of the advanced
options.
Global Search
=============
Global search allows you to search across multiple document types at once.
You can access global search by entering a search query in the main menu bar.
By default, global search queries search all document types: for example, they
will find matching tasks, commits, wiki documents, users, etc. You can use the
dropdown to the left of the search box to select a different search scope.
-If you choose the **Current Application** scope, Phabricator will search for
+If you choose the **Current Application** scope, Phorge will search for
open documents in the current application. For example, if you're in Maniphest
and run a search, you'll get matching tasks. If you're in Phriction and run a
search, you'll get matching wiki documents.
Some pages (like the 404 page) don't belong to an application, or belong to an
application which doesn't have any searchable documents. In these cases,
-Phabricator will search all documents.
+Phorge will search all documents.
To quickly **jump to an object** like a task, enter the object's ID in the
global search box and search for it. For example, you can enter `T123` or
`D456` to quickly jump to the corresponding task or code review, or enter a Git
commit hash to jump to the corresponding commit. For a complete list of
supported commands, see @{article:Search User Guide: Shortcuts}.
After running a search, you can scroll up to add filters and refine the result
set. You can also select **Advanced Search** from the dropdown menu to jump
here immediately, or press return in the search box without entering a query.
-This interface supports standard Phabricator search and filtering features,
+This interface supports standard Phorge search and filtering features,
like **saved queries** and **typeaheads**. See below for more details on using
these features.
Application Search
==================
Application search gives you a more powerful way to search one type of document,
like tasks. Most applications provide application search interfaces for the
documents or objects they let you create: these pages have queries in the left
menu, show objects or documents in the main content area, and have controls
for refining the results.
These interfaces support **saved queries** and **typeaheads**.
Saving and Sharing Queries
=============
If you have a query which you run often, you can save it for easy access.
To do this, click "Save Custom Query..." on the result screen. Choose a name
for your query and it will be saved in the left nav so you can run it again
with one click.
You can use "Edit Queries..." to reorder queries or remove saved queries you
don't use anymore.
If you drag a query to the top of the list, it will execute by default when
you load the relevant search interface. You can use this to make your default
view show the results you most often want.
You can share queries with other users by sending them the URL. This will run
the same query for them with all the parameters you've set (they may see
different results than you do, because they may not have the same permissions).
Typeaheads
==========
Typeaheads are text inputs which suggest options as you type. Typeaheads make
it easy to select users, projects, document types, and other kinds of objects
without typing their full names.
For example, if you want to find tasks that a specific user created, you can
use the "Authors:" filter in Maniphest. The filter uses a typeahead control
to let you enter authors who you want to search for.
To use a typeahead, enter the first few letters of the thing you want to
select. It will appear in a dropdown under your cursor, and you can select it
by clicking it (or using the arrow keys to highlight it, then pressing return).
If you aren't sure about the exact name of what you're looking for, click the
browse button ({nav icon=search}) to the right of the input. This will let you
browse through valid results for the control. You can filter the results from
within the browse dialog to narrow them down.
Some typeaheads support advanced selection functions which can let you build
more powerful queries. If a control supports functions, the "Browse" dialog
will show that advanced functions are available and give you a link to details
on which functions you can use.
For example, the `members()` function lets you automatically select all of the
members of a project. You could use this with the "Authors" filter to find
tasks created by anyone on a certain team.
Another useful function is the `viewer()` function, which works as though you'd
typed your own username when you run the query. However, if you send the query
to someone else, it will show results for //their// username when they run it.
This can be particularly useful when creating dashboard panels.
Fulltext Search
===============
Global search and some applications provide **fulltext search**. In
applications, this is a field called {nav Query}.
Fulltext search allows you to search the text content of objects and supports
some special syntax. These features are supported:
- Substring search with `~platypus`.
- Field search with `title:platypus`.
- Filtering out matches with `-platypus`.
- Quoted terms with `"platypus attorney"`.
- Matching entire fields with `=platypus`.
- Combining features with `title:~"platypus attorney"`.
- Testing a field for presence (`title:~`) or absence (`title:-`).
See below for more detail.
**Substrings**: Normally, query terms are searched for as words, so searching
for `read` won't find documents which only contain the word `threaded`, even
though "read" is a substring of "threaded". With the substring operator, `~`,
you can search for substrings instead: the query `~read` will match documents
which contain that text anywhere, even in the middle of a word.
**Quoted Terms**: When you search for multiple terms, documents which match
each term will be returned, even if the terms are not adjacent in the document.
For example, the query `void star` will match a document titled `A star in the
void`, because it matches both `void` and `star`. To search for an exact
sequence of terms, quote them: `"void star"`. This query will only match
documents which use those terms as written.
**Stemming**: Searching for a term like `rearming` will find documents which
contain variations of the word, like `rearm`, `rearms`, and `rearmed`. To
search for an an exact word, quote the term: `"rearming"`.
**Field Search**: By default, query terms are searched for in the title, body,
and comments. If you only want to search for a term in titles, use `title:`.
For example, `title:platypus` only finds documents with that term in the
title. This can be combined with other operators, for example `title:~platypus`
or `title:"platypus attorney"`. These scopes are also supported:
- `title:...` searches titles.
- `body:...` searches bodies (descriptions or summaries).
- `core:...` searches titles and bodies, but not comments.
- `comment:...` searches only comments.
**Filtering Matches**: You can remove documents which match certain terms from
the result set with `-`. For example: `platypus -mammal`. Documents which match
negated terms will be filtered out of the result set.
**Matching Entire Fields**: If you know the exact name of an object and want
to find only that object, you can use the `=` operator. A query like
`title:"warp drive"` will find a document titled "Warp Drive", but will also
find documents with longer titles, like "Not a Warp Drive". The `=` operator
requires that the entire field match the query exactly, so //only// documents
exactly titled "Warp Drive" will be matched by the query (but note that the
query is still case insensitive).
**Present and Absent Fields**: To find objects with //any// value in a
particular field, use `field:~` as a search term (with no additional text). For
example, searching Maniphest for `comment:~` will find tasks with any comments.
If you want to find objects that are //missing// a particular field, use
`field:-` with no additional argument. For example, searching Maniphest for
`body:-` will find tasks with no description.
diff --git a/src/docs/user/userguide/spaces.diviner b/src/docs/user/userguide/spaces.diviner
index 4a748fb25b..a2809cbe3e 100644
--- a/src/docs/user/userguide/spaces.diviner
+++ b/src/docs/user/userguide/spaces.diviner
@@ -1,167 +1,167 @@
@title Spaces User Guide
@group userguide
Guide to the Spaces application.
Overview
========
The Spaces application makes it easier to manage large groups of objects which
share the same access policy. For example:
- An organization might make a space for a project in order to satisfy a
contractual obligation to limit access, even internally.
- An open source organization might make a space for work related to
internal governance, to separate private and public discussions.
- A contracting company might make spaces for clients, to separate them from
one another.
- A company might create a spaces for consultants, to give them limited
access to only the resources they need to do their work.
- An ambitious manager might create a space to hide her team's work from her
enemies at the company, that she might use the element of surprise to later
expand her domain.
-Phabricator's access control policies are generally powerful enough to handle
+Phorge's access control policies are generally powerful enough to handle
these use cases on their own, but applying the same policy to a large group
of objects requires a lot of effort and is error-prone.
Spaces build on top of policies and make it easier and more reliable to
configure, review, and manage groups of objects with similar policies.
Creating Spaces
=================
Spaces are optional, and are inactive by default. You don't need to configure
them if you don't plan to use them. You can always set them up later.
To activate Spaces, you need to create at least two spaces. Create spaces from
the web UI, by navigating to {nav Spaces > Create Space}. By default, only
administrators can create new spaces, but you can configure this in the
{nav Applications} application.
The first space you create will be a special "default" space, and all existing
objects will be shifted into this space as soon as you create it. Spaces you
create later will be normal spaces, and begin with no objects inside them.
Create the first space (you may want to name it something like "Default" or
"Global" or "Public", depending on the nature of your organization), then
create a second space. Usually, the second space will be something like
"Secret Plans" and have a more restrictive "Visible To" policy.
Using Spaces
============
Once you've created at least two spaces, you can begin using them.
Application UIs will change for users who can see at least two spaces, opening
up new controls which let them work with spaces. They will now be able to
choose which space to create new objects into, be able to move objects between
spaces, and be able to search for objects in a specific space or set of spaces.
In list and detail views, objects will show which space they're in if they're
in a non-default space.
Users with access to only one space won't see these controls, even if many
spaces exist. This simplifies the UI for users with limited access.
Space Policies
==============
Briefly, spaces affect policies like this:
- Spaces apply their view policy to all objects inside the space.
- Space policies are absolute, and stronger than all other policies. A
user who can not see a space can **never** see objects inside the space.
- Normal policies are still checked: spaces can only reduce access.
When you create a space, you choose a view policy for that space by using the
**Visible To** control. This policy controls both who can see the space, and
who can see objects inside the space.
Spaces apply their view policy to all objects inside the space: if you can't
see a space, you can never see objects inside it. This policy check is absolute
and stronger than all other policy rules, including policy exceptions.
For example, a user can never see a task in a space they can't see, even if
they are an admin and the author and owner of the task, and subscribed to the
task and the view and edit policies are set to "All Users", and they created
the space originally and the moon is full and they are pure of heart and
possessed of the noblest purpose. Spaces are impenetrable.
Even if a user satisfies the view policy for a space, they must still pass the
view policy on the object: the space check is a new check in addition to any
check on the object, and can only limit access.
The edit policy for a space only affects the space itself, and is not applied
to objects inside the space.
Archiving Spaces
================
If you no longer need a space, you can archive it by choosing
{nav Archive Space} from the detail view. This hides the space and all the
objects in it without deleting any data.
New objects can't be created into archived spaces, and existing objects can't
be shifted into archived spaces. The UI won't give you options to choose
these spaces when creating or editing objects.
Additionally, objects (like tasks) in archived spaces won't be shown in most
search result lists by default. If you need to find objects in an archived
space, use the `Spaces` constraint to specifically search for objects in that
space.
You can reactivate a space later by choosing {nav Activate Space}.
Application Email
=================
After activating spaces, you can choose a space when configuring inbound email
addresses in {nav Applications}.
Spaces affect policies for application email just like they do for other
objects: to see or use the address, you must be able to see the space which
contains it.
Objects created from inbound email will be created in the space the email is
associated with.
Limitations and Caveats
=======================
Some information is shared between spaces, so they do not completely isolate
users from other activity on the install. This section discusses limitations
of the isolation model. Most of these limitations are intrinsic to the policy
-model Phabricator uses.
+model Phorge uses.
**Shared IDs**: Spaces do not have unique object IDs: there is only one `T1`,
not a separate one in each space. It can be moved between spaces, but `T1`
always refers to the same object. In most cases, this makes working with
spaces simpler and easier.
However, because IDs are shared, users in any space can look at object IDs to
determine how many objects exist in other spaces, even if they can't see those
objects. If a user creates a new task and sees that it is `T5000`, they can
know that there are 4,999 other tasks they don't have permission to see.
**Globally Unique Values**: Some values (like usernames, email addresses,
project hashtags, repository callsigns, and application emails) must be
globally unique.
As with normal policies, users may be able to determine that a `#yolo` project
exists, even if they can't see it: they can try to create a project using the
`#yolo` hashtag, and will receive an error if it is a duplicate.
**User Accounts**: Spaces do not apply to users, and can not hide the existence
of user accounts.
For example, if you are a contracting company and have Coke and Pepsi as
clients, the CEO of Coke and the CEO of Pepsi will each be able to see that the
other has an account on the install, even if all the work you are doing for
them is separated into "Coke" and "Pepsi" spaces.
diff --git a/src/docs/user/userguide/tone.diviner b/src/docs/user/userguide/tone.diviner
index 553fe2851b..6398917d2c 100644
--- a/src/docs/user/userguide/tone.diviner
+++ b/src/docs/user/userguide/tone.diviner
@@ -1,52 +1,52 @@
@title User Guide: Project Tone
@group userguide
-Explains why Phabricator uses a lighthearted tone.
+Explains why Phorge uses a lighthearted tone.
Overview
========
-Phabricator uses a lighthearted tone in documentation and some interfaces, and
+Phorge uses a lighthearted tone in documentation and some interfaces, and
includes some features which primarily exist to add flavor or make things
sillier.
We use this tone because we like building software like this, and the feedback
we receive about it from users is overwhelmingly (and often effusively)
positive.
Removing Flavor
===============
Although almost all feedback about project tone that we receive is positive, a
few users don't like the tone very much (possibly including you if you've been
linked to this document). If you prefer a more straightforward tone, you can
disable most of the flavor by turning on the `phabricator.serious-business`
setting in the {nav Config} application.
(Not everything is covered by this setting. For example, it won't change the
documentation.)
If you don't like a piece of flavor because it creates legitimate difficulty or
-confusion for you or your users and makes it harder to use Phabricator, let us
+confusion for you or your users and makes it harder to use Phorge, let us
know. We don't intend flavor or tone to get in the way of usability, and can
sometimes take a joke too far (particularly for users who don't speak English
natively).
If you don't like a piece of flavor because it's a joke that you don't get or
don't find particularly funny, but it doesn't impact your ability to understand
or use the software, we're less likely to remove it. We find all our jokes very
very funny and cherish each of them dearly.
(If you're committed to removing flavor on grounds of taste, we //might// be
willing to accept changes which replace our objectively very very funny jokes
with even better ones.)
Next Steps
==========
Continue by:
- ignoring this document and complaining about a joke that you don't think is
very funny with @{article:Contributing Bug Reports}.
diff --git a/src/docs/user/userguide/unlocking.diviner b/src/docs/user/userguide/unlocking.diviner
index 456655a393..cd1157a21d 100644
--- a/src/docs/user/userguide/unlocking.diviner
+++ b/src/docs/user/userguide/unlocking.diviner
@@ -1,123 +1,123 @@
@title User Guide: Unlocking Objects
@group userguide
Explains how to access locked or invisible objects and accounts.
Overview
========
-Phabricator tries to make it difficult for users to lock themselves out of
+Phorge tries to make it difficult for users to lock themselves out of
things, but you can occasionally end up in situations where no one has access
to an object that you need access to.
For example, sometimes the only user who had edit permission for something has
left the organization, or you configured a "Phase of the Moon" policy rule and
the stars aren't currently aligned.
You can use various CLI tools to unlock objects and accounts if you need to
regain access.
Unlocking Accounts
==================
If you need to regain access to an object, the easiest approach is usually to
recover access to the account which owns it, then change the object policies
to be more open using the web UI.
For example, if an important task was accidentally locked so that only a user
who is currently on vacation can edit it, you can log in as that user and
change the edit policy to something more permissive.
To regain access to an account:
```
$ ./bin/auth recover <username>
```
If the account you're recovering access to has MFA or other session prompts,
use the `--force-full-session` to bypass them:
```
$ ./bin/auth recover <username> --force-full-session
```
In either case, the command will give you a link you a one-time link you can
use to access the account from the web UI. From there, you can open up objects
or change settings.
Unlocking MFA
=============
You can completely strip MFA from a user account with:
```
$ ./bin/auth strip --user <username> ...
```
For detailed help on managing and stripping MFA, see the instructions in
@{article:User Guide: Multi-Factor Authentication}
Unlocking Objects
=================
If you aren't sure who owns an object, you can inspect the policies from the
CLI:
```
$ ./bin/policy show <object>
```
To identify the object you want to examine, you can specify an object
name (like `T123`) or a PHID as the `<object>` parameter.
If examining the policy isn't helpful, or no user account has access to an
object, you can then directly change object policies from the CLI:
```
$ ./bin/policy unlock <object> [--view ...] [--edit ...] [--owner ...]
```
Use the `--view` and `--edit` flags (and, for some objects, the `--owner`
flag) to specify new policies for the object.
For example, to make task `T123` editable by user `@alice`, run:
```
$ ./bin/policy unlock T123 --edit alice
```
Not every object has mutable view and edit policies, and not every object has
an owner, so each flag only works on some types of objects.
From here, you can log in to the web UI and change the relevant policies to
whatever you want to set them to.
No Enabled Users
================
If you accidentally disabled all administrator accounts, you can enable a
disabled account from the CLI like this:
```
$ ./bin/user enable --user <username>
```
From here, recover the account or log in normally.
No Administrators
=================
If you accidentally deleted all the administrator accounts, you can empower
a user as an administrator from the CLI like this:
```
$ ./bin/user empower --user <username>
```
This will upgrade the user account from a regular account to an administrator
account.
diff --git a/src/docs/user/userguide/users.diviner b/src/docs/user/userguide/users.diviner
index d66f8080d3..5e15f2f2af 100644
--- a/src/docs/user/userguide/users.diviner
+++ b/src/docs/user/userguide/users.diviner
@@ -1,107 +1,107 @@
@title User Guide: Account Roles
@group userguide
Describes account roles like "Administrator", "Disabled", "Bot" and "Mailing
List".
Overview
========
When you create a user account, you can set roles like "Administrator",
"Disabled", "Bot" and "Mailing List". This document explains what these roles
mean.
Administrators
==============
**Administrators** are normal users with a few extra capabilities. Their
primary role is to keep things running smoothly, and they are not all-powerful.
-In Phabricator, administrators are more like //janitors//.
+In Phorge, administrators are more like //janitors//.
Administrators can create, delete, enable, disable, and approve user accounts.
Various applications have a few other capabilities which are reserved for
administrators by default, but these can be changed to provide access to more
or fewer users.
Administrators are **not** in complete control of the system. Administrators
**can not** login as other users or act on behalf of other users. They can not
destroy data or make changes without leaving an audit trail. Administrators also
can not bypass object privacy policies.
Limiting the power of administrators means that administrators can't abuse
their power (they have very little power to abuse), a malicious administrator
can't do much damage, and an attacker who compromises an administrator account
is limited in what they can accomplish.
Bot Accounts
============
**Bot** ("Robot") accounts are accounts for bots and scripts which need to
interface with the system, but are not regular users. Generally, when you write
scripts that use the Conduit API, you should create a bot account for them.
The **Bot** role for an account can not be changed after the account is
created. This prevents administrators form changing a normal user into a bot,
retrieving their Conduit certificate, and then changing them back (which
would allow administrators to gain other users' credentials).
**Bot** accounts differ from normal accounts in that:
- they can not log in to the web UI;
- administrators can access them, edit settings, and retrieve credentials;
- they do not receive email;
- they appear with lower precedence in the UI when selecting users, with
a "Bot" note (because it usually does not make sense to, for example,
assign a task to a bot).
Mailing Lists
=============
**Mailing List** accounts let you represent an existing external mailing list
(like a Google Group or a Mailman list) as a user. You can subscribe this user
to objects (like tasks) to send them mail.
Because these accounts are also user accounts, they can be added to projects
and affected by policies. The list won't receive mail about anything the
underlying user account can't see.
The **Mailing List** role for an account can not be changed after the account
is created.
Some options can be configured for mailing lists by browsing to the list user's
profile and clicking {nav Edit Settings}. You can change the address for a
list by editing "Email Addresses" here, choose the language and format for
email the list receives, and customize which actions the list is notified about.
**Mailing List** accounts differ from normal accounts in that they:
- can not log in;
- can not access the Conduit API;
- administrators can access them and edit settings; and
- they appear with lower precedence in the UI when selecting users, with
a "Mailing List" note.
Disabled Users
==============
**Disabled Users** are accounts that are no longer active. Generally, when
someone leaves a project (e.g., leaves your company, or their internship or
contract ends) you should disable their account to terminate their access to
the system. Disabled users:
- can not login;
- can not access the Conduit API;
- do not receive email; and
- appear with lower precedence in the UI when selecting users, with a
"Disabled" note (because it usually does not make sense to, for example,
assign a task to a disabled user).
While users can also be deleted, it is strongly recommended that you disable
them instead, particularly if they interacted with any objects in the system.
If you delete a user entirely, you won't be able to find things they used to
own or restore their data later if they rejoin the project.
diff --git a/src/docs/user/userguide/utf8.diviner b/src/docs/user/userguide/utf8.diviner
index b6742f0c36..0e6727197d 100644
--- a/src/docs/user/userguide/utf8.diviner
+++ b/src/docs/user/userguide/utf8.diviner
@@ -1,38 +1,38 @@
@title User Guide: UTF-8 and Character Encoding
@group userguide
-How Phabricator handles character encodings.
+How Phorge handles character encodings.
= Overview =
-Phabricator stores all internal text data as UTF-8, processes all text data
+Phorge stores all internal text data as UTF-8, processes all text data
as UTF-8, outputs in UTF-8, and expects all inputs to be UTF-8. Principally,
this means that you should write your source code in UTF-8. In most cases this
does not require you to change anything, because ASCII text is a subset of
UTF-8.
If you have a repository with source files that do not have UTF-8, you have two
options:
- Convert all files in the repository to ASCII or UTF-8 (see "Detecting and
Repairing Files" below). This is recommended, especially if the encoding
problems are accidental.
- - Configure Phabricator to convert files into UTF-8 from whatever encoding
+ - Configure Phorge to convert files into UTF-8 from whatever encoding
your repository is in when it needs to (see "Support for Alternate
Encodings" below). This is not completely supported, and repositories with
files that have multiple encodings are not supported.
= Support for Alternate Encodings =
-Phabricator has some support for encodings other than UTF-8.
+Phorge has some support for encodings other than UTF-8.
NOTE: Alternate encodings are not completely supported, and a few features will
not work correctly. Codebases with files that have multiple different encodings
(for example, some files in ISO-8859-1 and some files in Shift-JIS) are not
supported at all.
To use an alternate encoding, edit the repository in Diffusion and specify the
encoding to use.
Optionally, you can use the `--encoding` flag when running `arc`, or set
`encoding` in your `.arcconfig`.
diff --git a/src/docs/user/userguide/webhooks.diviner b/src/docs/user/userguide/webhooks.diviner
index 10d1f36da0..c2d0678b26 100644
--- a/src/docs/user/userguide/webhooks.diviner
+++ b/src/docs/user/userguide/webhooks.diviner
@@ -1,223 +1,223 @@
@title User Guide: Webhooks
@group userguide
Guide to configuring webhooks.
Overview
========
-If you'd like to react to events in Phabricator or publish them into external
+If you'd like to react to events in Phorge or publish them into external
systems, you can configure webhooks.
Configure webhooks in {nav Herald > Webhooks}. Users must have the
"Can Create Webhooks" permission to create new webhooks.
Triggering Hooks
================
Webhooks can be triggered in two ways:
- Set the hook mode to **Firehose**. In this mode, your hook will be called
for every event.
- Set the hook mode to **Enabled**, then write Herald rules which use the
**Call webhooks** action to choose when the hook is called. This allows
you to choose a narrower range of events to be notified about.
Testing Hooks
=============
To test a webhook, use {nav New Test Request} from the web interface.
You can also use the command-line tool, which supports a few additional
options:
```
-phabricator/ $ ./bin/webhook call --id 42 --object D123
+phorge/ $ ./bin/webhook call --id 42 --object D123
```
Verifying Requests
==================
When your webhook callback URI receives a request, it didn't necessarily come
-from Phabricator. An attacker or mischievous user can normally call your hook
+from Phorge. An attacker or mischievous user can normally call your hook
directly and pretend to be notifying you of an event.
To verify that the request is authentic, first retrieve the webhook key from
the web UI with {nav View HMAC Key}. This is a shared secret which will let you
-verify that Phabricator originated a request.
+verify that Phorge originated a request.
When you receive a request, compute the SHA256 HMAC value of the request body
using the HMAC key as the key. The value should match the value in the
`X-Phabricator-Webhook-Signature` field.
To compute the SHA256 HMAC of a string in PHP, do this:
```lang=php
$signature = hash_hmac('sha256', $request_body, $hmac_key);
```
To compute the SHA256 HMAC of a string in Python, do this:
```lang=python
from subprocess import check_output
signature = check_output(
[
"php",
"-r",
"echo hash_hmac('sha256', $argv[1], $argv[2]);",
"--",
request_body,
hmac_key
])
```
Other languages often provide similar support.
If you somehow disclose the key by accident, use {nav Regenerate HMAC Key} to
throw it away and generate a new one.
Request Format
==============
Webhook callbacks are POST requests with a JSON payload in the body. The
payload looks like this:
```lang=json
{
"object": {
"type": "TASK",
"phid": "PHID-TASK-abcd..."
},
"triggers": [
{
"phid": "PHID-HRUL-abcd..."
}
],
"action": {
"test": false,
"silent": false,
"secure": false,
"epoch": 12345
},
"transactions": [
{
"phid": "PHID-XACT-TASK-abcd..."
}
]
}
```
The **object** map describes the object which was edited.
The **triggers** are a list of reasons why the hook was called. When the hook
is triggered by Herald rules, the specific rules which triggered the call will
be listed. For firehose rules, the rule itself will be listed as the trigger.
For test calls, the user making the request will be listed as a trigger.
The **action** map has metadata about the action:
- `test` This was a test call from the web UI or console.
- `silent` This is a silent edit which won't send mail or notifications in
- Phabricator. If your hook is doing something like copying events into
+ Phorge. If your hook is doing something like copying events into
a chatroom, it may want to respect this flag.
- `secure` Details about this object should only be transmitted over
secure channels. Your hook may want to respect this flag.
- `epoch` The epoch timestamp when the callback was queued.
The **transactions** list contains information about the actual changes which
triggered the callback.
Responding to Requests
======================
Although trivial hooks may not need any more information than this to act, the
information conveyed in the hook body is a minimum set of pointers to relevant
data and likely insufficient for more complex hooks.
Complex hooks should expect to react to receiving a request by making API
calls to Conduit to retrieve additional information about the object and
transactions.
Hooks that are interested in reading object state should generally make a call
to a method like `maniphest.search` or `differential.revision.search` using
the PHID from the `object` field to retrieve full details about the object
state.
Hooks that are interested in changes should generally make a call to
`transaction.search`, passing the transaction PHIDs as a constraint to retrieve
details about the transactions.
For example, your call to `transaction.search` may look something like this:
```lang=json
{
"objectIdentifier": "PHID-XXXX-abcdef",
"constraints": {
"phids": [
"PHID-XACT-XXXX-11111111",
"PHID-XACT-XXXX-22222222"
]
}
}
```
The `phid.query` method can also be used to retrieve generic information about
a list of objects.
Retries and Rate Limiting
=========================
Test requests are never retried: they execute exactly once.
Live requests are automatically retried. If your endpoint does not return a
HTTP 2XX response, the request will be retried regularly until it suceeds.
Retries will continue until the request succeeds or is garbage collected. By
default, this is after 7 days.
If a webhook is disabled, outstanding queued requests will be failed
permanently. Activity which occurs while it is disabled will never be sent to
the callback URI. (Disabling a hook does not "pause" it so that it can be
"resumed" later and pick back up where it left off in the event stream.)
If a webhook encounters a significant number of errors in a short period of
time, the webhook will be paused for a few minutes before additional requests
are made. The web UI shows a warning indicator when a hook is paused because of
errors.
Hook requests time out after 10 seconds. Consider offloading response handling
to some kind of worker queue if you expect to routinely require more than 10
seconds to respond to requests.
Hook callbacks are single-threaded: you will never receive more than one
-simultaneous call to the same webhook from Phabricator. If you have a firehose
+simultaneous call to the same webhook from Phorge. If you have a firehose
hook on an active install, it may be important to respond to requests quickly
to avoid accumulating a backlog.
Callbacks may be invoked out-of-order. You should not assume that the order
you receive requests in is chronological order. If your hook is order-dependent,
you can ignore the transactions in the callback and use `transaction.search` to
retrieve a consistent list of ordered changes to the object.
Callbacks may be delayed for an arbitrarily long amount of time, up to the
garbage collection limit. You should not assume that calls are real time. If
your hook is doing something time-sensitive, you can measure the delivery delay
by comparing the current time to the `epoch` value in the `action` field and
ignoring old actions or handling them in some special way.
Next Steps
==========
Continue by:
- learning more about Herald with @{article:Herald User Guide}; or
- interacting with the Conduit API with @{article:Conduit API Overview}.

File Metadata

Mime Type
text/x-diff
Expires
Sat, Feb 22, 16:35 (21 h, 5 m ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1110679
Default Alt Text
(923 KB)

Event Timeline