Merge branch 'stable-3.5'

* stable-3.5:
  Revert "bazel: Use canonical reproducible load_bazlets rule"
  Fix formatting
  bazel: Use canonical reproducible load_bazlets rule
  Update bazel version to 5.3.1
  Update .bazelrc to match core
  Fix documentation for stream command
  Avoid streaming check on dropped events
  Support filtering refUpdate events for NoteDb meta refs
  Fix NoFileException during spinSubmit()
  Fix documentation for trim cmd
  Migrate build to python 3
  Fire internal listeners before external stream ones
  Test core stream-events also

Fix the change-restored filtered test to expect the correct number of
events on 3.6 where a bugfix removed no-op attention set updates.

Update bazlets to a version that contains all the commits from the
stable-3.5 branch as well as the 3.6 commit.

Also update the Dockerfile to use the latest 3.6 stable release with
event ordering fixes.

Change-Id: Id144d2ddec331e584fc00ebbe28072cf2f121d58
diff --git a/.bazelrc b/.bazelrc
index 3ae03ff..fb8e4d1 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -1,2 +1,18 @@
-build --workspace_status_command="python ./tools/workspace_status.py"
+build --java_language_version=11
+build --java_runtime_version=remotejdk_11
+build --tool_java_language_version=11
+build --tool_java_runtime_version=remotejdk_11
+
+build --workspace_status_command="python3 ./tools/workspace_status.py"
+build --repository_cache=~/.gerritcodereview/bazel-cache/repository
+build --action_env=PATH
+build --disk_cache=~/.gerritcodereview/bazel-cache/cas
+
+# Enable strict_action_env flag to. For more information on this feature see
+# https://groups.google.com/forum/#!topic/bazel-discuss/_VmRfMyyHBk.
+# This will be the new default behavior at some point (and the flag was flipped
+# shortly in 0.21.0 - https://github.com/bazelbuild/bazel/issues/7026). Remove
+# this flag here once flipped in Bazel again.
+build --incompatible_strict_action_env
+
 test --build_tests_only
diff --git a/.bazelversion b/.bazelversion
index 7c69a55..c7cb131 100644
--- a/.bazelversion
+++ b/.bazelversion
@@ -1 +1 @@
-3.7.0
+5.3.1
diff --git a/WORKSPACE b/WORKSPACE
index 78c73b8..989ddb9 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -3,7 +3,7 @@
 load("//:bazlets.bzl", "load_bazlets")
 
 load_bazlets(
-    commit = "e68cc7a45d9ee2b100024b9b12533b50a4598585",
+    commit = "ea5aa9f256fe94577accb074f1f51e12af106708",
 )
 
 load(
diff --git a/src/main/java/com/googlesource/gerrit/plugins/events/FileSystemEventBroker.java b/src/main/java/com/googlesource/gerrit/plugins/events/FileSystemEventBroker.java
index f3f5855..180e5cb 100644
--- a/src/main/java/com/googlesource/gerrit/plugins/events/FileSystemEventBroker.java
+++ b/src/main/java/com/googlesource/gerrit/plugins/events/FileSystemEventBroker.java
@@ -14,11 +14,14 @@
 
 package com.googlesource.gerrit.plugins.events;
 
+import com.google.gerrit.common.Nullable;
 import com.google.gerrit.entities.BranchNameKey;
 import com.google.gerrit.entities.Change;
 import com.google.gerrit.entities.Project;
+import com.google.gerrit.entities.RefNames;
 import com.google.gerrit.extensions.annotations.PluginName;
 import com.google.gerrit.extensions.registration.DynamicSet;
+import com.google.gerrit.server.CurrentUser;
 import com.google.gerrit.server.config.GerritInstanceId;
 import com.google.gerrit.server.config.GerritServerConfigProvider;
 import com.google.gerrit.server.config.PluginConfig;
@@ -34,27 +37,44 @@
 import com.google.gerrit.server.permissions.PermissionBackend;
 import com.google.gerrit.server.permissions.PermissionBackendException;
 import com.google.gerrit.server.plugincontext.PluginSetContext;
+import com.google.gerrit.server.plugincontext.PluginSetEntryContext;
 import com.google.gerrit.server.project.ProjectCache;
 import com.google.gson.Gson;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
-import javax.annotation.Nullable;
+import java.util.function.Predicate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @Singleton
 public class FileSystemEventBroker extends EventBroker {
   private static final Logger log = LoggerFactory.getLogger(FileSystemEventBroker.class);
-  protected final EventStore store;
-  protected final Gson gson;
-  protected final DynamicSet<StreamEventListener> streamEventListeners;
-  protected volatile long lastSent;
+
+  protected static final Predicate<Event> IS_NOTEDB_METAREF =
+      event -> {
+        if (event instanceof RefEvent) {
+          return RefNames.isNoteDbMetaRef(((RefEvent) event).getRefName());
+        }
+        return false;
+      };
+
   protected static final String KEY_FILTER = "filter";
   protected static final String FILTER_TYPE_DROP = "DROP";
   protected static final String FILTER_ELEMENT_CLASSNAME = "classname";
+  protected static final String FILTER_ELEMENT_EVENT_REFUPDATED = "RefUpdatedEvent";
+  protected static final String FILTER_TEST_IS_NOTEDB_METAREF = "isNoteDbMetaRef";
+
+  protected final EventStore store;
+  protected final Gson gson;
+  protected final DynamicSet<StreamEventListener> streamEventListeners;
+
+  protected long lastSent;
+  protected Predicate<Event> drop = e -> false;
   protected Set<String> dropEventNames = new HashSet<>();
 
   @Inject
@@ -87,71 +107,176 @@
 
   @Override
   public void postEvent(Change change, ChangeEvent event) throws PermissionBackendException {
-    storeEvent(event);
-    sendAllPendingEvents();
+    Drop drop = storeEvent(event);
+    super.postEvent(change, event);
+    fireEventForStreamListeners(drop);
   }
 
   @Override
   public void postEvent(Project.NameKey projectName, ProjectEvent event) {
-    storeEvent(event);
+    Drop drop = storeEvent(event);
+    super.postEvent(projectName, event);
     try {
-      sendAllPendingEvents();
+      fireEventForStreamListeners(drop);
     } catch (PermissionBackendException e) {
       log.error("Permission Exception while dispatching the event. Will be tried again.", e);
     }
   }
 
   @Override
-  protected void fireEvent(BranchNameKey branchName, RefEvent event)
+  public void postEvent(BranchNameKey branchName, RefEvent event)
       throws PermissionBackendException {
-    storeEvent(event);
-    sendAllPendingEvents();
+    Drop drop = storeEvent(event);
+    super.postEvent(branchName, event);
+    fireEventForStreamListeners(drop);
   }
 
   @Override
   public void postEvent(Event event) throws PermissionBackendException {
-    storeEvent(event);
-    sendAllPendingEvents();
+    Drop drop = storeEvent(event);
+    super.postEvent(event);
+    fireEventForStreamListeners(drop);
   }
 
-  protected void storeEvent(Event event) {
-    if (dropEventNames.contains(event.getClass().getName())) {
-      return;
-    }
-    try {
-      store.add(gson.toJson(event));
-    } catch (IOException ex) {
-      log.error("Cannot add event to event store", ex);
-    }
-  }
-
-  public synchronized void sendAllPendingEvents() throws PermissionBackendException {
-    try {
-      long current = store.getHead();
-      while (lastSent < current) {
-        long next = lastSent + 1;
-        fireEvent(gson.fromJson(store.get(next), Event.class));
-        lastSent = next;
+  protected Drop storeEvent(Event event) {
+    if (!isDropEvent(event)) {
+      try {
+        store.add(gson.toJson(event));
+        return Drop.FALSE;
+      } catch (IOException ex) {
+        log.error("Cannot add event to event store", ex);
       }
-    } catch (IOException e) {
-      // Next Event would re-try the events.
     }
-    for (StreamEventListener l : streamEventListeners) {
-      l.onStreamEventUpdate();
+    return Drop.TRUE;
+  }
+
+  protected boolean isDropEvent(Event event) {
+    if (drop.test(event) || dropEventNames.contains(event.getClass().getName())) {
+      return true;
     }
+    return false;
+  }
+
+  public void fireEventForStreamListeners() throws PermissionBackendException {
+    fireEventForStreamListeners(Drop.FALSE);
+  }
+
+  protected synchronized void fireEventForStreamListeners(Drop drop)
+      throws PermissionBackendException {
+    if (!Drop.TRUE.equals(drop)) {
+      try {
+        long current = store.getHead();
+        while (lastSent < current) {
+          long next = lastSent + 1;
+          fireEventForUserScopedEventListener(
+              Type.STREAM, gson.fromJson(store.get(next), Event.class));
+          lastSent = next;
+        }
+      } catch (IOException e) {
+        // Next Event would re-try the events.
+      }
+      for (StreamEventListener l : streamEventListeners) {
+        l.onStreamEventUpdate();
+      }
+    }
+  }
+
+  @Override
+  protected void fireEvent(Change change, ChangeEvent event) throws PermissionBackendException {
+    setInstanceIdWhenEmpty(event);
+    for (PluginSetEntryContext<UserScopedEventListener> c : getListeners(Type.NON_STREAM)) {
+      CurrentUser user = c.call(UserScopedEventListener::getUser);
+      if (isVisibleTo(change, user)) {
+        c.run(l -> l.onEvent(event));
+      }
+    }
+    fireEventForUnrestrictedListeners(event);
+  }
+
+  @Override
+  protected void fireEvent(Project.NameKey project, ProjectEvent event) {
+    setInstanceIdWhenEmpty(event);
+    for (PluginSetEntryContext<UserScopedEventListener> c : getListeners(Type.NON_STREAM)) {
+      CurrentUser user = c.call(UserScopedEventListener::getUser);
+      if (isVisibleTo(project, user)) {
+        c.run(l -> l.onEvent(event));
+      }
+    }
+    fireEventForUnrestrictedListeners(event);
+  }
+
+  @Override
+  protected void fireEvent(BranchNameKey branchName, RefEvent event)
+      throws PermissionBackendException {
+    setInstanceIdWhenEmpty(event);
+    for (PluginSetEntryContext<UserScopedEventListener> c : getListeners(Type.NON_STREAM)) {
+      CurrentUser user = c.call(UserScopedEventListener::getUser);
+      if (isVisibleTo(branchName, user)) {
+        c.run(l -> l.onEvent(event));
+      }
+    }
+    fireEventForUnrestrictedListeners(event);
+  }
+
+  @Override
+  protected void fireEvent(Event event) throws PermissionBackendException {
+    setInstanceIdWhenEmpty(event);
+    fireEventForUserScopedEventListener(Type.NON_STREAM, event);
+    fireEventForUnrestrictedListeners(event);
+  }
+
+  protected void fireEventForUserScopedEventListener(Type type, Event event)
+      throws PermissionBackendException {
+    for (PluginSetEntryContext<UserScopedEventListener> c : getListeners(type)) {
+      CurrentUser user = c.call(UserScopedEventListener::getUser);
+      if (isVisibleTo(event, user)) {
+        c.run(l -> l.onEvent(event));
+      }
+    }
+  }
+
+  protected enum Type {
+    STREAM,
+    NON_STREAM
+  }
+
+  protected enum Drop {
+    TRUE,
+    FALSE
+  }
+
+  protected List<PluginSetEntryContext<UserScopedEventListener>> getListeners(Type type) {
+    List<PluginSetEntryContext<UserScopedEventListener>> filteredListeners = new ArrayList<>();
+    for (PluginSetEntryContext<UserScopedEventListener> c : listeners) {
+      if ((type == Type.STREAM) == isStreamListener(c.get())) {
+        filteredListeners.add(c);
+      }
+    }
+    return filteredListeners;
+  }
+
+  protected boolean isStreamListener(UserScopedEventListener l) {
+    return l.getClass().getName().startsWith("com.google.gerrit.sshd.commands.StreamEvents");
   }
 
   protected void readAndParseCfg(String pluginName, GerritServerConfigProvider configProvider) {
     PluginConfig cfg = PluginConfig.createFromGerritConfig(pluginName, configProvider.loadConfig());
     for (String filter : cfg.getStringList(KEY_FILTER)) {
       String pieces[] = filter.split(" ");
-      if (pieces.length == 3
-          && FILTER_TYPE_DROP.equals(pieces[0])
-          && FILTER_ELEMENT_CLASSNAME.equals(pieces[1])) {
-        dropEventNames.add(pieces[2]);
-      } else {
-        log.error("Ignoring invalid filter: " + filter);
+      if (pieces.length == 3) {
+        if (FILTER_TYPE_DROP.equals(pieces[0])) {
+          if (FILTER_ELEMENT_CLASSNAME.equals(pieces[1])) {
+            dropEventNames.add(pieces[2]);
+            continue;
+          }
+          if (FILTER_ELEMENT_EVENT_REFUPDATED.equals(pieces[1])
+              && FILTER_TEST_IS_NOTEDB_METAREF.equals(pieces[2])) {
+            drop = IS_NOTEDB_METAREF;
+            continue;
+          }
+        }
       }
+      log.error("Ignoring invalid filter: " + filter);
     }
   }
 }
diff --git a/src/main/java/com/googlesource/gerrit/plugins/events/fsstore/FsListener.java b/src/main/java/com/googlesource/gerrit/plugins/events/fsstore/FsListener.java
index 7327700..3ad34c8 100644
--- a/src/main/java/com/googlesource/gerrit/plugins/events/fsstore/FsListener.java
+++ b/src/main/java/com/googlesource/gerrit/plugins/events/fsstore/FsListener.java
@@ -80,7 +80,7 @@
   @Override
   public void run() {
     try {
-      broker.sendAllPendingEvents();
+      broker.fireEventForStreamListeners();
     } catch (PermissionBackendException e) {
       // Ignore
     }
diff --git a/src/main/java/com/googlesource/gerrit/plugins/events/fsstore/UpdatableFileValue.java b/src/main/java/com/googlesource/gerrit/plugins/events/fsstore/UpdatableFileValue.java
index 67d51b0..0f7d3b5 100644
--- a/src/main/java/com/googlesource/gerrit/plugins/events/fsstore/UpdatableFileValue.java
+++ b/src/main/java/com/googlesource/gerrit/plugins/events/fsstore/UpdatableFileValue.java
@@ -16,6 +16,7 @@
 
 import java.io.IOException;
 import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.nio.file.attribute.FileTime;
@@ -290,7 +291,11 @@
 
     // Maximum delay incurred due to a server crash.
     FileTime expiry = Fs.getFileTimeAgo(10, TimeUnit.SECONDS);
-    return Nfs.isAllEntriesOlderThan(paths.update, expiry);
+    try {
+      return Nfs.isAllEntriesOlderThan(paths.update, expiry);
+    } catch (NoSuchFileException e) {
+      return false;
+    }
   }
 
   protected abstract UniqueUpdate<T> createUniqueUpdate(String uuid, boolean ours, long maxTries)
diff --git a/src/main/resources/Documentation/about.md b/src/main/resources/Documentation/about.md
index f4d8079..c34eea7 100644
--- a/src/main/resources/Documentation/about.md
+++ b/src/main/resources/Documentation/about.md
@@ -27,6 +27,10 @@
 
  DROP classname fully.qualified.java.ClassName
 
+or:
+
+ DROP RefUpdatedEvent isNoteDbMetaRef
+
 If the `plugin.@PLUGIN@.filter` key is specified more than once it
 will cause events matching any of the rules to be dropped.
 
diff --git a/src/main/resources/Documentation/cmd-stream.md b/src/main/resources/Documentation/cmd-stream.md
index 2e9b7ad..74e6a79 100644
--- a/src/main/resources/Documentation/cmd-stream.md
+++ b/src/main/resources/Documentation/cmd-stream.md
@@ -54,14 +54,14 @@
 --------
 
 ```
-    $ ssh -p 29418 review.example.com @PLUGIN@ stream-events --ids
+    $ ssh -p 29418 review.example.com @PLUGIN@ stream --ids
     {"type":"comment-added", ..., "id":"bdedff7d-34fd-4459-a6a7-3f738f32c01d:1"}
     {"type":"change-merged", ..., "id":"bdedff7d-34fd-4459-a6a7-3f738f32c01d:2"}
 
 ```
 
 ```
-    $ ssh -p 29418 review.example.com @PLUGIN@ stream-events --ids \
+    $ ssh -p 29418 review.example.com @PLUGIN@ stream --ids \
         --resume-after bdedff7d-34fd-4459-a6a7-3f738f32c01d:1
     {"type":"change-merged", ..., "id":"bdedff7d-34fd-4459-a6a7-3f738f32c01d:2"}
     {"type":"change-abandoned", ..., "id":"bdedff7d-34fd-4459-a6a7-3f738f32c01d:3"}
diff --git a/src/main/resources/Documentation/cmd-trim.md b/src/main/resources/Documentation/cmd-trim.md
index 35ab609..f092b29 100644
--- a/src/main/resources/Documentation/cmd-trim.md
+++ b/src/main/resources/Documentation/cmd-trim.md
@@ -9,7 +9,7 @@
 --------
 ```
 ssh -p @SSH_PORT@ @SSH_HOST@ @PLUGIN@ trim
-   [--id] <TRIM_ID>
+   [--trim-id] <TRIM_ID>
    [--size] <SIZE>
 ```
 
diff --git a/test/docker/gerrit/Dockerfile b/test/docker/gerrit/Dockerfile
index 86e21dc..127b922 100755
--- a/test/docker/gerrit/Dockerfile
+++ b/test/docker/gerrit/Dockerfile
@@ -1,4 +1,4 @@
-FROM gerritcodereview/gerrit:3.6.1-ubuntu20
+FROM gerritcodereview/gerrit:3.6.8-ubuntu20
 
 ENV GERRIT_SITE /var/gerrit
 COPY artifacts/plugins/ $GERRIT_SITE/plugins/
diff --git a/test/test_events_plugin.sh b/test/test_events_plugin.sh
index f8ba2f9..406b351 100755
--- a/test/test_events_plugin.sh
+++ b/test/test_events_plugin.sh
@@ -17,7 +17,8 @@
 }
 
 cleanup() {
-    wait_event
+    wait_event_for plugin
+    wait_event_for core
     (kill_diff_captures ; sleep 1 ; kill_diff_captures -9 ) &
 }
 
@@ -106,6 +107,13 @@
         --data '{"message":"unmark_private"}' "$REST_API_CHANGES_URL/$1/private"
 }
 
+
+add_meta_ref_updates() { # num_events num_meta_ref_upates > total_num_events
+    local n=$1 m=$2
+    [ "$FILTERED" = "META_REF_UPDATES" ] && { echo "$n" ; return ; }
+    echo $(($n + $m))
+}
+
 # ------------------------- Event Capturing ---------------------------
 
 kill_diff_captures() { # sig
@@ -122,44 +130,122 @@
     CAPTURE_PIDS=("${CAPTURE_PIDS[@]}" $!)
 }
 
-capture_events() { # count
-    local count=$1
+capture_events_for() { # 'plugin'|'core' count
+    local for=$1 count=$2 cmd
     [ -n "$count" ] || count=1
 
     # Re-create the fifo to ensure that is is empty
-    rm -f -- "$EVENT_FIFO"
-    mkfifo -- "$EVENT_FIFO"
+    rm -f -- "$EVENT_FIFO.$for"
+    mkfifo -- "$EVENT_FIFO.$for"
 
-    head -n $count < "$EVENT_FIFO" > "$EVENTS" &
-    CAPTURE_PID_HEAD=$!
+    head -n $count < "$EVENT_FIFO.$for" > "$EVENTS.$for" &
+    CAPTURE_PID_HEAD[$for]=$!
     sleep 1
-    ssh -p 29418 -x "$SERVER" "${PLUGIN_CMD[@]}" > "$EVENT_FIFO" &
-    CAPTURE_PID_SSH=$!
+    case "$1" in
+        plugin) cmd=("${PLUGIN_CMD[@]}") ;;
+        core)   cmd=("${CORE_CMD[@]}") ;;
+        *) echo "Unkown type: $for (should be plugin|core)" >&2 ; exit 1 ;;
+    esac
+    ssh -p 29418 -x "$SERVER" "${cmd[@]}" > "$EVENT_FIFO.$for" &
+    CAPTURE_PID_SSH[$for]=$!
     sleep 1
 }
 
-wait_event() {
+capture_events() { # count
+    capture_events_for plugin "$@"
+    capture_events_for core "$@"
+}
+
+wait_event_for() { # 'plugin'|'core'
    # Below kill of CAPTURE_PID_HEAD is a safety net and ideally we wouldn't
    # want this kill to stop CAPTURE_PID_HEAD, rather we want it die on its
    # own when the 'head' in capture_events() captures the desired events. The
    # delay here must ideally be greater than the run time of the entire suite.
-   (sleep 120 ; q kill -9 $CAPTURE_PID_HEAD ; ) &
-   q wait $CAPTURE_PID_HEAD
-   q kill -9 $CAPTURE_PID_SSH
-   q wait $CAPTURE_PID_SSH
+   (sleep 120 ; q kill -9 ${CAPTURE_PID_HEAD[$1]} ; ) &
+   q wait ${CAPTURE_PID_HEAD[$1]}
+   q kill -9 ${CAPTURE_PID_SSH[$1]}
+   q wait ${CAPTURE_PID_SSH[$1]}
 }
 
-
-result_event() { # test type [expected_count]
-    local test=$1 type=$2 expected_count=$3
+result_event_for() { # 'plugin'|'core' test type [expected_count]
+    local for=$1 test=$2 type=$3 expected_count=$4
     [ -n "$expected_count" ] || expected_count=1
-    wait_event
-    local actual_count=$(grep -c "\"type\":\"$type\"" "$EVENTS")
-    result_out "$test" "$expected_count $type event(s)" "$actual_count $type event(s)"
+    wait_event_for "$for"
+    local actual_count=$(grep -c "\"type\":\"$type\"" "$EVENTS.$for")
+    result_out "$test $for" "$expected_count $type event(s)" "$actual_count $type event(s)"
+    [ "$expected_count" = "$actual_count" ] || cat "$EVENTS.$for"
 }
 
+# 'plugin'|'core' test type [expected_count]
+result_type_for() { result_event_for "$1" "$2 $3" "$3" "$4" ; }
+
 # test type [expected_count]
-result_type() { result_event "$1 $2" "$2" "$3" ; }
+result_type() {
+    result_type_for plugin "$@"
+    result_type_for core "$@"
+}
+
+# ------------------------- Tests ---------------------------
+
+main_suite() { # group_name
+    GROUP=$1
+    setup_diff_captures
+
+    type=patchset-created
+    capture_events $(add_meta_ref_updates 2 1)
+    ch1=$(create_change "$REF_BRANCH" "$FILE_A") || exit
+    result_type "$GROUP" "$type" 1
+    # The change ref and its meta ref are expected to be updated
+    # For example: 'refs/changes/01/1001/1' and 'refs/changes/01/1001/meta'
+    result_type "$GROUP $type" "ref-updated" $(add_meta_ref_updates 1 1)
+
+    type=change-abandoned
+    capture_events $(add_meta_ref_updates 1 1)
+    review "$ch1,1" --abandon
+    result_type "$GROUP" "$type"
+
+    type=change-restored
+    capture_events $(add_meta_ref_updates 1 1)
+    review "$ch1,1" --restore
+    result_type "$GROUP" "$type"
+
+    type=comment-added
+    capture_events $(add_meta_ref_updates 1 1)
+    review "$ch1,1" --message "my_comment" $APPROVALS
+    result_type "$GROUP" "$type"
+
+    type=wip-state-changed
+    capture_events $(add_meta_ref_updates 1 3)
+    q mark_change_wip "$ch1"
+    q mark_change_ready "$ch1"
+    result_type "$GROUP" "$type" $(add_meta_ref_updates 1 1)
+
+    type=private-state-changed
+    capture_events $(add_meta_ref_updates 1 3)
+    q mark_change_private "$ch1"
+    q unmark_change_private "$ch1"
+    result_type "$GROUP" "$type" $(add_meta_ref_updates 1 1)
+
+    type=change-merged
+    events_count=$(add_meta_ref_updates 2 2)
+    # If reviewnotes plugin is installed, an extra event of type 'ref-updated'
+    # on 'refs/notes/review' is fired when a change is merged.
+    is_plugin_installed reviewnotes && events_count="$((events_count+1))"
+    capture_events "$events_count"
+    submit "$ch1,1"
+    result_type "$GROUP" "$type"
+    # The destination ref of the change, its meta ref and notes ref(if reviewnotes
+    # plugin is installed) are expected to be updated.
+    # For example: 'refs/heads/master', 'refs/changes/01/1001/meta' and 'refs/notes/review'
+    result_type "$GROUP $type" "ref-updated" "$((events_count-1))"
+
+    # reviewer-added needs to be tested via Rest-API
+
+    out=$(diff -- "$EVENTS_CORE" "$EVENTS_PLUGIN")
+    result "$GROUP core/plugin diff" "$out"
+
+    kill_diff_captures
+}
 
 # ------------------------- Usage ---------------------------
 
@@ -237,6 +323,8 @@
 EVENT_FIFO=$TEST_DIR/event-fifo
 EVENTS=$TEST_DIR/events
 GERRIT_CFG="/gerrit.config/gerrit.config"
+declare -A CAPTURE_PID_HEAD
+declare -A CAPTURE_PID_SSH
 
 trap cleanup EXIT
 
@@ -247,83 +335,35 @@
 RESULT=0
 
 # ------------------------- Individual Event Tests ---------------------------
-GROUP=visible-events
+FILTERED=""
 set_filter_rules # No rules
-setup_diff_captures
-
-type=patchset-created
-capture_events 3
-ch1=$(create_change "$REF_BRANCH" "$FILE_A") || exit
-result_type "$GROUP" "$type" 1
-# The change ref and its meta ref are expected to be updated
-# For example: 'refs/changes/01/1001/1' and 'refs/changes/01/1001/meta'
-result_type "$GROUP $type" "ref-updated" 2
-
-type=change-abandoned
-capture_events 2
-review "$ch1,1" --abandon
-result_type "$GROUP" "$type"
-
-type=change-restored
-capture_events 2
-review "$ch1,1" --restore
-result_type "$GROUP" "$type"
-
-type=comment-added
-capture_events 2
-review "$ch1,1" --message "my_comment" $APPROVALS
-result_type "$GROUP" "$type"
-
-type=wip-state-changed
-capture_events 4
-q mark_change_wip "$ch1"
-q mark_change_ready "$ch1"
-result_type "$GROUP" "$type" 2
-
-type=private-state-changed
-capture_events 4
-q mark_change_private "$ch1"
-q unmark_change_private "$ch1"
-result_type "$GROUP" "$type" 2
-
-type=change-merged
-events_count=4
-# If reviewnotes plugin is installed, an extra event of type 'ref-updated'
-# on 'refs/notes/review' is fired when a change is merged.
-is_plugin_installed reviewnotes && events_count="$((events_count+1))"
-capture_events "$events_count"
-submit "$ch1,1"
-result_type "$GROUP" "$type"
-# The destination ref of the change, its meta ref and notes ref(if reviewnotes
-# plugin is installed) are expected to be updated.
-# For example: 'refs/heads/master', 'refs/changes/01/1001/meta' and 'refs/notes/review'
-result_type "$GROUP $type" "ref-updated" "$((events_count-1))"
-
-# reviewer-added needs to be tested via Rest-API
-
-out=$(diff -- "$EVENTS_CORE" "$EVENTS_PLUGIN")
-result "$GROUP core/plugin diff" "$out"
-
-kill_diff_captures
+main_suite visible-events
 
 # ------------------------- Filtering -------------------------
 
+FILTERED="META_REF_UPDATES"
+set_filter_rules 'DROP RefUpdatedEvent isNoteDbMetaRef'
+main_suite meta-refUpdated-filtered
+
+
+FILTERED=""
 GROUP=restored-filtered
 set_filter_rules 'DROP classname com.google.gerrit.server.events.ChangeRestoredEvent'
 
 ch1=$(create_change "$REF_BRANCH" "$FILE_A") || exit
 type=change-abandoned
-
-capture_events 2
+capture_events $(add_meta_ref_updates 1 1)
 review "$ch1,1" --abandon
 result_type "$GROUP" "$type"
 
 type=change-restored
-capture_events 3
+capture_events $(add_meta_ref_updates 1 2)
 review "$ch1,1" --restore
 # Instead of timing out waiting for the filtered change-restored event,
 # create follow-on events and capture them to trigger completion.
 review "$ch1,1" --message "'\"trigger filtered completion\"'"
 result_type "$GROUP" "$type" 0
+result_type "$GROUP" "comment-added" 1
+result_type "$GROUP" "ref-updated" 2
 
 exit $RESULT
diff --git a/tools/workspace_status.py b/tools/workspace_status.py
index fa4f1e9..067d262 100644
--- a/tools/workspace_status.py
+++ b/tools/workspace_status.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 # This script will be run by bazel when the build process starts to
 # generate key-value information that represents the status of the